コード例 #1
0
    def __init__(
            self, n_neurons, machine_time_step, timescale_factor,
            constraints=None, label="SpikeSourcePoisson", rate=1.0, start=0.0,
            duration=None, seed=None):
        AbstractPartitionableVertex.__init__(
            self, n_atoms=n_neurons, label=label, constraints=constraints,
            max_atoms_per_core=self._model_based_max_atoms_per_core)
        AbstractDataSpecableVertex.__init__(
            self, machine_time_step=machine_time_step,
            timescale_factor=timescale_factor)
        AbstractSpikeRecordable.__init__(self)
        ReceiveBuffersToHostBasicImpl.__init__(self)
        AbstractProvidesOutgoingEdgeConstraints.__init__(self)
        PopulationSettableChangeRequiresMapping.__init__(self)

        # Store the parameters
        self._rate = rate
        self._start = start
        self._duration = duration
        self._rng = numpy.random.RandomState(seed)

        # Prepare for recording, and to get spikes
        self._spike_recorder = SpikeRecorder(machine_time_step)
        self._spike_buffer_max_size = config.getint(
            "Buffers", "spike_buffer_size")
        self._buffer_size_before_receive = config.getint(
            "Buffers", "buffer_size_before_receive")
        self._time_between_requests = config.getint(
            "Buffers", "time_between_requests")
コード例 #2
0
    def __init__(self,
                 n_neurons,
                 spike_times,
                 machine_time_step,
                 spikes_per_second,
                 ring_buffer_sigma,
                 timescale_factor,
                 port=None,
                 tag=None,
                 ip_address=None,
                 board_address=None,
                 max_on_chip_memory_usage_for_spikes_in_bytes=None,
                 space_before_notification=640,
                 constraints=None,
                 label="SpikeSourceArray"):
        if ip_address is None:
            ip_address = config.get("Buffers", "receive_buffer_host")
        if port is None:
            port = config.getint("Buffers", "receive_buffer_port")

        AbstractDataSpecableVertex.__init__(
            self,
            machine_time_step=machine_time_step,
            timescale_factor=timescale_factor)
        AbstractPartitionableVertex.__init__(
            self,
            n_atoms=n_neurons,
            label=label,
            max_atoms_per_core=self._model_based_max_atoms_per_core,
            constraints=constraints)
        AbstractOutgoingEdgeSameContiguousKeysRestrictor.__init__(self)
        self._spike_times = spike_times
        self._max_on_chip_memory_usage_for_spikes = \
            max_on_chip_memory_usage_for_spikes_in_bytes
        self._space_before_notification = space_before_notification

        self.add_constraint(
            TagAllocatorRequireIptagConstraint(ip_address,
                                               port,
                                               strip_sdp=True,
                                               board_address=board_address,
                                               tag_id=tag))

        if self._max_on_chip_memory_usage_for_spikes is None:
            self._max_on_chip_memory_usage_for_spikes = \
                front_end_common_constants.MAX_SIZE_OF_BUFFERED_REGION_ON_CHIP

        # check the values do not conflict with chip memory limit
        if self._max_on_chip_memory_usage_for_spikes < 0:
            raise ConfigurationException(
                "The memory usage on chip is either beyond what is supportable"
                " on the spinnaker board being supported or you have requested"
                " a negative value for a memory usage. Please correct and"
                " try again")

        # Keep track of any previously generated buffers
        self._send_buffers = dict()
コード例 #3
0
ファイル: spinnaker.py プロジェクト: jackokaiser/sPyNNaker
    def _create_virtual_chip(self, virtual_vertex):
        sdram_object = SDRAM()

        # creates the two links
        spinnaker_link_id = virtual_vertex.get_spinnaker_link_id
        spinnaker_link_data = \
            self._machine.locate_connected_chips_coords_and_link(
                config.getint("Machine", "version"), spinnaker_link_id)
        virtual_link_id = (spinnaker_link_data.connected_link + 3) % 6
        to_virtual_chip_link = Link(
            destination_x=virtual_vertex.virtual_chip_x,
            destination_y=virtual_vertex.virtual_chip_y,
            source_x=spinnaker_link_data.connected_chip_x,
            source_y=spinnaker_link_data.connected_chip_y,
            multicast_default_from=virtual_link_id,
            multicast_default_to=virtual_link_id,
            source_link_id=spinnaker_link_data.connected_link)

        from_virtual_chip_link = Link(
            destination_x=spinnaker_link_data.connected_chip_x,
            destination_y=spinnaker_link_data.connected_chip_y,
            source_x=virtual_vertex.virtual_chip_x,
            source_y=virtual_vertex.virtual_chip_y,
            multicast_default_from=(spinnaker_link_data.connected_link),
            multicast_default_to=spinnaker_link_data.connected_link,
            source_link_id=virtual_link_id)

        # create the router
        links = [from_virtual_chip_link]
        router_object = MachineRouter(
            links=links,
            emergency_routing_enabled=False,
            clock_speed=MachineRouter.ROUTER_DEFAULT_CLOCK_SPEED,
            n_available_multicast_entries=sys.maxint)

        # create the processors
        processors = list()
        for virtual_core_id in range(0, 128):
            processors.append(
                Processor(virtual_core_id, Processor.CPU_AVAILABLE,
                          virtual_core_id == 0))

        # connect the real chip with the virtual one
        connected_chip = self._machine.get_chip_at(
            spinnaker_link_data.connected_chip_x,
            spinnaker_link_data.connected_chip_y)
        connected_chip.router.add_link(to_virtual_chip_link)

        # return new v chip
        return Chip(processors=processors,
                    router=router_object,
                    sdram=sdram_object,
                    x=virtual_vertex.virtual_chip_x,
                    y=virtual_vertex.virtual_chip_y,
                    virtual=True,
                    nearest_ethernet_x=None,
                    nearest_ethernet_y=None)
コード例 #4
0
ファイル: spinnaker.py プロジェクト: ominux/sPyNNaker
    def _create_virtual_chip(self, virtual_vertex):
        """ Create a virtual chip as a real chip in the spinnmachine machine\
            object
        :param virtual_vertex: virutal vertex to convert into a real chip
        :return: the real chip
        """
        sdram_object = SDRAM()

        # creates the two links
        spinnaker_link_id = virtual_vertex.get_spinnaker_link_id
        spinnaker_link_data = \
            self._machine.locate_connected_chips_coords_and_link(
                config.getint("Machine", "version"), spinnaker_link_id)
        virtual_link_id = (spinnaker_link_data.connected_link + 3) % 6
        to_virtual_chip_link = Link(
            destination_x=virtual_vertex.virtual_chip_x,
            destination_y=virtual_vertex.virtual_chip_y,
            source_x=spinnaker_link_data.connected_chip_x,
            source_y=spinnaker_link_data.connected_chip_y,
            multicast_default_from=virtual_link_id,
            multicast_default_to=virtual_link_id,
            source_link_id=spinnaker_link_data.connected_link)

        from_virtual_chip_link = Link(
            destination_x=spinnaker_link_data.connected_chip_x,
            destination_y=spinnaker_link_data.connected_chip_y,
            source_x=virtual_vertex.virtual_chip_x,
            source_y=virtual_vertex.virtual_chip_y,
            multicast_default_from=(spinnaker_link_data.connected_link),
            multicast_default_to=spinnaker_link_data.connected_link,
            source_link_id=virtual_link_id)

        # create the router
        links = [from_virtual_chip_link]
        router_object = MachineRouter(
            links=links, emergency_routing_enabled=False,
            clock_speed=MachineRouter.ROUTER_DEFAULT_CLOCK_SPEED,
            n_available_multicast_entries=sys.maxint)

        # create the processors
        processors = list()
        for virtual_core_id in range(0, 128):
            processors.append(Processor(virtual_core_id,
                                        Processor.CPU_AVAILABLE,
                                        virtual_core_id == 0))

        # connect the real chip with the virtual one
        connected_chip = self._machine.get_chip_at(
            spinnaker_link_data.connected_chip_x,
            spinnaker_link_data.connected_chip_y)
        connected_chip.router.add_link(to_virtual_chip_link)

        # return new v chip
        return Chip(
            processors=processors, router=router_object, sdram=sdram_object,
            x=virtual_vertex.virtual_chip_x, y=virtual_vertex.virtual_chip_y,
            virtual=True, nearest_ethernet_x=None, nearest_ethernet_y=None)
コード例 #5
0
    def __init__(
            self, n_neurons, spike_times, machine_time_step, timescale_factor,
            port=None, tag=None, ip_address=None, board_address=None,
            max_on_chip_memory_usage_for_spikes_in_bytes=None,
            space_before_notification=640,
            constraints=None, label="SpikeSourceArray"):
        if ip_address is None:
            ip_address = config.get("Buffers", "receive_buffer_host")
        if port is None:
            port = config.getint("Buffers", "receive_buffer_port")

        AbstractDataSpecableVertex.__init__(
            self, machine_time_step=machine_time_step,
            timescale_factor=timescale_factor)
        AbstractPartitionableVertex.__init__(
            self, n_atoms=n_neurons, label=label,
            max_atoms_per_core=self._model_based_max_atoms_per_core,
            constraints=constraints)
        AbstractSpikeRecordable.__init__(self)
        self._spike_times = spike_times
        self._max_on_chip_memory_usage_for_spikes = \
            max_on_chip_memory_usage_for_spikes_in_bytes
        self._space_before_notification = space_before_notification

        self.add_constraint(TagAllocatorRequireIptagConstraint(
            ip_address, port, strip_sdp=True, board_address=board_address,
            tag_id=tag))

        if self._max_on_chip_memory_usage_for_spikes is None:
            self._max_on_chip_memory_usage_for_spikes = \
                front_end_common_constants.MAX_SIZE_OF_BUFFERED_REGION_ON_CHIP

        # check the values do not conflict with chip memory limit
        if self._max_on_chip_memory_usage_for_spikes < 0:
            raise ConfigurationException(
                "The memory usage on chip is either beyond what is supportable"
                " on the spinnaker board being supported or you have requested"
                " a negative value for a memory usage. Please correct and"
                " try again")

        if (self._max_on_chip_memory_usage_for_spikes <
                self._space_before_notification):
            self._space_before_notification =\
                self._max_on_chip_memory_usage_for_spikes

        # Keep track of any previously generated buffers
        self._send_buffers = dict()
        self._spike_recording_region_size = None

        # handle recording
        self._spike_recorder = EIEIOSpikeRecorder(machine_time_step)

        #handle outgoing constraints
        self._outgoing_edge_key_restrictor = \
            OutgoingEdgeSameContiguousKeysRestrictor()
コード例 #6
0
    def __init__(
            self, n_neurons, machine_time_step, timescale_factor,
            constraints=None, label="SpikeSourcePoisson", rate=1.0, start=0.0,
            duration=None, seed=None):
        AbstractPartitionableVertex.__init__(
            self, n_neurons, label, self._model_based_max_atoms_per_core,
            constraints)
        AbstractDataSpecableVertex.__init__(
            self, machine_time_step=machine_time_step,
            timescale_factor=timescale_factor)
        AbstractSpikeRecordable.__init__(self)
        AbstractProvidesOutgoingPartitionConstraints.__init__(self)
        PopulationSettableChangeRequiresMapping.__init__(self)

        # Store the parameters
        self._rate = utility_calls.convert_param_to_numpy(rate, n_neurons)
        self._start = utility_calls.convert_param_to_numpy(start, n_neurons)
        self._duration = utility_calls.convert_param_to_numpy(
            duration, n_neurons)
        self._rng = numpy.random.RandomState(seed)

        # Prepare for recording, and to get spikes
        self._spike_recorder = MultiSpikeRecorder(machine_time_step)
        self._spike_buffer_max_size = config.getint(
            "Buffers", "spike_buffer_size")
        self._buffer_size_before_receive = config.getint(
            "Buffers", "buffer_size_before_receive")
        self._time_between_requests = config.getint(
            "Buffers", "time_between_requests")
        self._enable_buffered_recording = config.getboolean(
            "Buffers", "enable_buffered_recording")
        self._receive_buffer_host = config.get(
            "Buffers", "receive_buffer_host")
        self._receive_buffer_port = config.getint(
            "Buffers", "receive_buffer_port")
        self._minimum_buffer_sdram = config.getint(
            "Buffers", "minimum_buffer_sdram")
        self._using_auto_pause_and_resume = config.getboolean(
            "Buffers", "use_auto_pause_and_resume")
コード例 #7
0
    def __init__(
            self, n_neurons, machine_time_step, timescale_factor,
            constraints=None, label="SpikeSourcePoisson", rate=1.0, start=0.0,
            duration=None, seed=None):
        AbstractPartitionableVertex.__init__(
            self, n_neurons, label, self._model_based_max_atoms_per_core,
            constraints)
        AbstractDataSpecableVertex.__init__(
            self, machine_time_step=machine_time_step,
            timescale_factor=timescale_factor)
        AbstractSpikeRecordable.__init__(self)
        AbstractProvidesOutgoingPartitionConstraints.__init__(self)
        PopulationSettableChangeRequiresMapping.__init__(self)

        # Store the parameters
        self._rate = rate
        self._start = start
        self._duration = duration
        self._rng = numpy.random.RandomState(seed)

        # Prepare for recording, and to get spikes
        self._spike_recorder = SpikeRecorder(machine_time_step)
        self._spike_buffer_max_size = config.getint(
            "Buffers", "spike_buffer_size")
        self._buffer_size_before_receive = config.getint(
            "Buffers", "buffer_size_before_receive")
        self._time_between_requests = config.getint(
            "Buffers", "time_between_requests")
        self._enable_buffered_recording = config.getboolean(
            "Buffers", "enable_buffered_recording")
        self._receive_buffer_host = config.get(
            "Buffers", "receive_buffer_host")
        self._receive_buffer_port = config.getint(
            "Buffers", "receive_buffer_port")
        self._minimum_buffer_sdram = config.getint(
            "Buffers", "minimum_buffer_sdram")
        self._using_auto_pause_and_resume = config.getboolean(
            "Buffers", "use_auto_pause_and_resume")
コード例 #8
0
    def __init__(
            self, n_neurons, spike_times, machine_time_step, spikes_per_second,
            ring_buffer_sigma, timescale_factor, port=None, tag=None,
            ip_address=None, board_address=None,
            max_on_chip_memory_usage_for_spikes_in_bytes=None,
            constraints=None, label="SpikeSourceArray"):
        if ip_address is None:
            ip_address = config.get("Buffers", "receive_buffer_host")
        if port is None:
            port = config.getint("Buffers", "receive_buffer_port")

        AbstractDataSpecableVertex.__init__(
            self, machine_time_step=machine_time_step,
            timescale_factor=timescale_factor)
        AbstractPartitionableVertex.__init__(
            self, n_atoms=n_neurons, label=label,
            max_atoms_per_core=self._model_based_max_atoms_per_core,
            constraints=constraints)
        AbstractOutgoingEdgeSameContiguousKeysRestrictor.__init__(self)
        self._spike_times = spike_times
        self._max_on_chip_memory_usage_for_spikes = \
            max_on_chip_memory_usage_for_spikes_in_bytes
        self._threshold_for_reporting_bytes_written = 0

        self.add_constraint(TagAllocatorRequireIptagConstraint(
            ip_address, port, strip_sdp=True, board_address=board_address,
            tag_id=tag))

        if self._max_on_chip_memory_usage_for_spikes is None:
            self._max_on_chip_memory_usage_for_spikes = 8 * 1024 * 1024

        # check the values do not conflict with chip memory limit
        if self._max_on_chip_memory_usage_for_spikes < 0:
            raise ConfigurationException(
                "The memory usage on chip is either beyond what is supportable"
                " on the spinnaker board being supported or you have requested"
                " a negative value for a memory usage. Please correct and"
                " try again")

        # Keep track of any previously generated buffers
        self._send_buffers = dict()
コード例 #9
0
    def __init__(
            self, n_neurons, spike_times, machine_time_step, timescale_factor,
            port=None, tag=None, ip_address=None, board_address=None,
            max_on_chip_memory_usage_for_spikes_in_bytes=(
                constants.SPIKE_BUFFER_SIZE_BUFFERING_IN),
            space_before_notification=640,
            constraints=None, label="SpikeSourceArray",
            spike_recorder_buffer_size=(
                constants.EIEIO_SPIKE_BUFFER_SIZE_BUFFERING_OUT),
            buffer_size_before_receive=(
                constants.EIEIO_BUFFER_SIZE_BEFORE_RECEIVE)):
        self._ip_address = ip_address
        if ip_address is None:
            self._ip_address = config.get("Buffers", "receive_buffer_host")
        self._port = port
        if port is None:
            self._port = config.getint("Buffers", "receive_buffer_port")

        ReverseIpTagMultiCastSource.__init__(
            self, n_keys=n_neurons, machine_time_step=machine_time_step,
            timescale_factor=timescale_factor, label=label,
            constraints=constraints,
            max_atoms_per_core=(SpikeSourceArray.
                                _model_based_max_atoms_per_core),
            board_address=board_address,
            receive_port=None, receive_sdp_port=None, receive_tag=None,
            virtual_key=None, prefix=None, prefix_type=None, check_keys=False,
            send_buffer_times=spike_times,
            send_buffer_max_space=max_on_chip_memory_usage_for_spikes_in_bytes,
            send_buffer_space_before_notify=space_before_notification,
            send_buffer_notification_ip_address=self._ip_address,
            send_buffer_notification_port=self._port,
            send_buffer_notification_tag=tag)
        AbstractSpikeRecordable.__init__(self)

        # handle recording
        self._spike_recorder = EIEIOSpikeRecorder(machine_time_step)
        self._spike_recorder_buffer_size = spike_recorder_buffer_size
        self._buffer_size_before_receive = buffer_size_before_receive
コード例 #10
0
ファイル: spinnaker.py プロジェクト: ominux/sPyNNaker
    def __init__(self, host_name=None, timestep=None, min_delay=None,
                 max_delay=None, graph_label=None,
                 database_socket_addresses=None):
        FrontEndCommonConfigurationFunctions.__init__(self, host_name,
                                                      graph_label)
        SpynnakerConfigurationFunctions.__init__(self)
        FrontEndCommonProvenanceFunctions.__init__(self)

        self._database_socket_addresses = set()
        self._database_interface = None
        self._create_database = None
        self._populations = list()

        if self._app_id is None:
            self._set_up_main_objects(
                app_id=config.getint("Machine", "appID"),
                execute_data_spec_report=config.getboolean(
                    "Reports", "writeTextSpecs"),
                execute_partitioner_report=config.getboolean(
                    "Reports", "writePartitionerReports"),
                execute_placer_report=config.getboolean(
                    "Reports", "writePlacerReports"),
                execute_router_dat_based_report=config.getboolean(
                    "Reports", "writeRouterDatReport"),
                reports_are_enabled=config.getboolean(
                    "Reports", "reportsEnabled"),
                generate_performance_measurements=config.getboolean(
                    "Reports", "outputTimesForSections"),
                execute_router_report=config.getboolean(
                    "Reports", "writeRouterReports"),
                execute_write_reload_steps=config.getboolean(
                    "Reports", "writeReloadSteps"),
                generate_transciever_report=config.getboolean(
                    "Reports", "writeTransceiverReport"),
                execute_routing_info_report=config.getboolean(
                    "Reports", "writeRouterInfoReport"),
                in_debug_mode=config.get("Mode", "mode") == "Debug",
                generate_tag_report=config.getboolean(
                    "Reports", "writeTagAllocationReports"))

            self._set_up_pacman_algorthms_listings(
                partitioner_algorithm=config.get("Partitioner", "algorithm"),
                placer_algorithm=config.get("Placer", "algorithm"),
                key_allocator_algorithm=config.get(
                    "KeyAllocator", "algorithm"),
                routing_algorithm=config.get("Routing", "algorithm"))

            # set up exeuctable specifics
            self._set_up_executable_specifics()
            self._set_up_report_specifics(
                default_report_file_path=config.get(
                    "Reports", "defaultReportFilePath"),
                max_reports_kept=config.getint("Reports", "max_reports_kept"),
                reports_are_enabled=config.getboolean(
                    "Reports", "reportsEnabled"),
                write_provance_data=config.getboolean(
                    "Reports", "writeProvanceData"),
                write_text_specs=config.getboolean(
                    "Reports", "writeTextSpecs"))
            self._set_up_output_application_data_specifics(
                max_application_binaries_kept=config.getint(
                    "Reports", "max_application_binaries_kept"),
                where_to_write_application_data_files=config.get(
                    "Reports", "defaultApplicationDataFilePath"))

        # set up spynnaker specifics, such as setting the machineName from conf
        self._set_up_machine_specifics(
            timestep, min_delay, max_delay, host_name)
        self._spikes_per_second = float(config.getfloat(
            "Simulation", "spikes_per_second"))
        self._ring_buffer_sigma = float(config.getfloat(
            "Simulation", "ring_buffer_sigma"))

        # Determine default executable folder location
        # and add this default to end of list of search paths
        executable_finder.add_path(os.path.dirname(model_binaries.__file__))

        FrontEndCommonInterfaceFunctions.__init__(
            self, self._reports_states, self._report_default_directory,
            self._app_data_runtime_folder)

        logger.info("Setting time scale factor to {}."
                    .format(self._time_scale_factor))

        logger.info("Setting appID to %d." % self._app_id)

        # get the machine time step
        logger.info("Setting machine time step to {} micro-seconds."
                    .format(self._machine_time_step))

        self._edge_count = 0

        # Manager of buffered sending
        self._send_buffer_manager = None
コード例 #11
0
ファイル: spinnaker.py プロジェクト: SpikeFrame/sPyNNaker
    def _set_up_timings(self, timestep, min_delay, max_delay):
        self._machine_time_step = config.getint("Machine", "machineTimeStep")

        # deal with params allowed via the setup options
        if timestep is not None:

            # convert into milliseconds from microseconds
            timestep *= 1000
            self._machine_time_step = timestep

        if min_delay is not None and float(min_delay * 1000) < 1.0 * timestep:
            raise common_exceptions.ConfigurationException(
                "Pacman does not support min delays below {} ms with the "
                "current machine time step"
                .format(constants.MIN_SUPPORTED_DELAY * timestep))

        natively_supported_delay_for_models = \
            constants.MAX_SUPPORTED_DELAY_TICS
        delay_extension_max_supported_delay = \
            constants.MAX_DELAY_BLOCKS \
            * constants.MAX_TIMER_TICS_SUPPORTED_PER_BLOCK

        max_delay_tics_supported = \
            natively_supported_delay_for_models + \
            delay_extension_max_supported_delay

        if max_delay is not None\
           and float(max_delay * 1000) > max_delay_tics_supported * timestep:
            raise common_exceptions.ConfigurationException(
                "Pacman does not support max delays above {} ms with the "
                "current machine time step".format(0.144 * timestep))
        if min_delay is not None:
            self._min_supported_delay = min_delay
        else:
            self._min_supported_delay = timestep / 1000.0

        if max_delay is not None:
            self._max_supported_delay = max_delay
        else:
            self._max_supported_delay = (max_delay_tics_supported *
                                         (timestep / 1000.0))

        if (config.has_option("Machine", "timeScaleFactor") and
                config.get("Machine", "timeScaleFactor") != "None"):
            self._time_scale_factor = \
                config.getint("Machine", "timeScaleFactor")
            if timestep * self._time_scale_factor < 1000:
                if config.getboolean(
                        "Mode", "violate_1ms_wall_clock_restriction"):
                    logger.warn(
                        "****************************************************")
                    logger.warn(
                        "*** The combination of simulation time step and  ***")
                    logger.warn(
                        "*** the machine time scale factor results in a   ***")
                    logger.warn(
                        "*** wall clock timer tick that is currently not  ***")
                    logger.warn(
                        "*** reliably supported by the spinnaker machine. ***")
                    logger.warn(
                        "****************************************************")
                else:
                    raise common_exceptions.ConfigurationException(
                        "The combination of simulation time step and the"
                        " machine time scale factor results in a wall clock "
                        "timer tick that is currently not reliably supported "
                        "by the spinnaker machine.  If you would like to "
                        "override this behaviour (at your own risk), please "
                        "add violate_1ms_wall_clock_restriction = True to the "
                        "[Mode] section of your .spynnaker.cfg file")
        else:
            self._time_scale_factor = max(1,
                                          math.ceil(1000.0 / float(timestep)))
            if self._time_scale_factor > 1:
                logger.warn("A timestep was entered that has forced sPyNNaker "
                            "to automatically slow the simulation down from "
                            "real time by a factor of {}. To remove this "
                            "automatic behaviour, please enter a "
                            "timescaleFactor value in your .spynnaker.cfg"
                            .format(self._time_scale_factor))
コード例 #12
0
ファイル: spinnaker.py プロジェクト: astoeckel/sPyNNaker
    def _create_pacman_executor_inputs(
            self, this_run_time, is_resetting=False):

        application_graph_changed = \
            self._detect_if_graph_has_changed(not is_resetting)
        inputs = list()

        # file path to store any provenance data to
        provenance_file_path = \
            os.path.join(self._report_default_directory, "provance_data")
        if not os.path.exists(provenance_file_path):
                os.mkdir(provenance_file_path)

        # all modes need the NoSyncChanges
        if application_graph_changed:
            self._no_sync_changes = 0
        inputs.append(
            {'type': "NoSyncChanges", 'value': self._no_sync_changes})

        # support resetting the machine during start up
        if (config.getboolean("Machine", "reset_machine_on_startup") and
                not self._has_ran and not is_resetting):
            inputs.append(
                {"type": "ResetMachineOnStartupFlag", 'value': True})
        else:
            inputs.append(
                {"type": "ResetMachineOnStartupFlag", 'value': False})

        # support runtime updater
        if self._has_ran and not is_resetting:
            no_machine_time_steps =\
                int((this_run_time * 1000.0) /
                    self._machine_time_step)
            inputs.append({'type': "RunTimeMachineTimeSteps",
                           'value': no_machine_time_steps})

        # FrontEndCommonPartitionableGraphApplicationDataLoader after a
        # reset and no changes
        if not self._has_ran and not application_graph_changed:
            inputs.append(({
                'type': "ProcessorToAppDataBaseAddress",
                "value": self._processor_to_app_data_base_address_mapper}))
            inputs.append({"type": "PlacementToAppDataFilePaths",
                           'value': self._placement_to_app_data_file_paths})
            inputs.append({'type': "WriteCheckerFlag",
                           'value': config.getboolean(
                               "Mode", "verify_writes")})

        # support resetting when there's changes in the application graph
        # (only need to exit)
        if application_graph_changed and is_resetting:
            inputs.append({"type": "MemoryTransciever", 'value': self._txrx})
            inputs.append({'type': "ExecutableTargets",
                           'value': self._executable_targets})
            inputs.append({'type': "MemoryPlacements",
                           'value': self._placements})
            inputs.append({'type': "MemoryGraphMapper",
                           'value': self._graph_mapper})
            inputs.append({'type': "APPID", 'value': self._app_id})
            inputs.append({'type': "RanToken", 'value': self._has_ran})

        elif application_graph_changed and not is_resetting:

            # make a folder for the json files to be stored in
            json_folder = os.path.join(
                self._report_default_directory, "json_files")
            if not os.path.exists(json_folder):
                os.mkdir(json_folder)

            # translate config "None" to None
            width = config.get("Machine", "width")
            height = config.get("Machine", "height")
            if width == "None":
                width = None
            else:
                width = int(width)
            if height == "None":
                height = None
            else:
                height = int(height)

            number_of_boards = config.get("Machine", "number_of_boards")
            if number_of_boards == "None":
                number_of_boards = None

            scamp_socket_addresses = config.get("Machine",
                                                "scamp_connections_data")
            if scamp_socket_addresses == "None":
                scamp_socket_addresses = None

            boot_port_num = config.get("Machine", "boot_connection_port_num")
            if boot_port_num == "None":
                boot_port_num = None
            else:
                boot_port_num = int(boot_port_num)

            inputs.append({'type': "MemoryPartitionableGraph",
                           'value': self._partitionable_graph})
            inputs.append({'type': 'ReportFolder',
                           'value': self._report_default_directory})
            inputs.append({'type': "ApplicationDataFolder",
                           'value': self._app_data_runtime_folder})
            inputs.append({'type': 'IPAddress', 'value': self._hostname})

            # basic input stuff
            inputs.append({'type': "BMPDetails",
                           'value': config.get("Machine", "bmp_names")})
            inputs.append({'type': "DownedChipsDetails",
                           'value': config.get("Machine", "down_chips")})
            inputs.append({'type': "DownedCoresDetails",
                           'value': config.get("Machine", "down_cores")})
            inputs.append({'type': "BoardVersion",
                           'value': config.getint("Machine", "version")})
            inputs.append({'type': "NumberOfBoards",
                           'value': number_of_boards})
            inputs.append({'type': "MachineWidth", 'value': width})
            inputs.append({'type': "MachineHeight", 'value': height})
            inputs.append({'type': "AutoDetectBMPFlag",
                           'value': config.getboolean("Machine",
                                                      "auto_detect_bmp")})
            inputs.append({'type': "EnableReinjectionFlag",
                           'value': config.getboolean("Machine",
                                                      "enable_reinjection")})
            inputs.append({'type': "ScampConnectionData",
                           'value': scamp_socket_addresses})
            inputs.append({'type': "BootPortNum", 'value': boot_port_num})
            inputs.append({'type': "APPID", 'value': self._app_id})
            inputs.append({'type': "RunTime", 'value': this_run_time})
            inputs.append({'type': "TimeScaleFactor",
                           'value': self._time_scale_factor})
            inputs.append({'type': "MachineTimeStep",
                           'value': self._machine_time_step})
            inputs.append({'type': "DatabaseSocketAddresses",
                           'value': self._database_socket_addresses})
            inputs.append({'type': "DatabaseWaitOnConfirmationFlag",
                           'value': config.getboolean(
                               "Database", "wait_on_confirmation")})
            inputs.append({'type': "WriteCheckerFlag",
                           'value': config.getboolean(
                               "Mode", "verify_writes")})
            inputs.append({'type': "WriteTextSpecsFlag",
                           'value': config.getboolean(
                               "Reports", "writeTextSpecs")})
            inputs.append({'type': "ExecutableFinder",
                           'value': executable_finder})
            inputs.append({'type': "MachineHasWrapAroundsFlag",
                           'value': config.getboolean(
                               "Machine", "requires_wrap_arounds")})
            inputs.append({'type': "ReportStates",
                           'value': self._reports_states})
            inputs.append({'type': "UserCreateDatabaseFlag",
                           'value': config.get("Database", "create_database")})
            inputs.append({'type': "ExecuteMapping",
                           'value': config.getboolean(
                               "Database",
                               "create_routing_info_to_neuron_id_mapping")})
            inputs.append({'type': "DatabaseSocketAddresses",
                           'value': self._database_socket_addresses})
            inputs.append({'type': "SendStartNotifications",
                           'value': config.getboolean(
                               "Database", "send_start_notification")})
            inputs.append({'type': "ProvenanceFilePath",
                           'value': provenance_file_path})

            # add paths for each file based version
            inputs.append({'type': "FileCoreAllocationsFilePath",
                           'value': os.path.join(
                               json_folder, "core_allocations.json")})
            inputs.append({'type': "FileSDRAMAllocationsFilePath",
                           'value': os.path.join(
                               json_folder, "sdram_allocations.json")})
            inputs.append({'type': "FileMachineFilePath",
                           'value': os.path.join(
                               json_folder, "machine.json")})
            inputs.append({'type': "FilePartitionedGraphFilePath",
                           'value': os.path.join(
                               json_folder, "partitioned_graph.json")})
            inputs.append({'type': "FilePlacementFilePath",
                           'value': os.path.join(
                               json_folder, "placements.json")})
            inputs.append({'type': "FileRouingPathsFilePath",
                           'value': os.path.join(
                               json_folder, "routing_paths.json")})
            inputs.append({'type': "FileConstraintsFilePath",
                           'value': os.path.join(
                               json_folder, "constraints.json")})

            if self._has_ran:
                logger.warn(
                    "The network has changed, and therefore mapping will be"
                    " done again.  Any recorded data will be erased.")
        else:
            # mapping does not need to be executed, therefore add
            # the data elements needed for the application runner and
            # runtime re-setter
            inputs.append({"type": "BufferManager",
                           "value": self._buffer_manager})
            inputs.append({'type': "DatabaseWaitOnConfirmationFlag",
                           'value': config.getboolean(
                               "Database", "wait_on_confirmation")})
            inputs.append({'type': "SendStartNotifications",
                           'value': config.getboolean(
                               "Database", "send_start_notification")})
            inputs.append({'type': "DatabaseInterface",
                           'value': self._database_interface})
            inputs.append({"type": "DatabaseSocketAddresses",
                           'value': self._database_socket_addresses})
            inputs.append({'type': "DatabaseFilePath",
                           'value': self._database_file_path})
            inputs.append({'type': "ExecutableTargets",
                           'value': self._executable_targets})
            inputs.append({'type': "APPID", 'value': self._app_id})
            inputs.append({"type": "MemoryTransciever", 'value': self._txrx})
            inputs.append({"type": "RunTime",
                           'value': this_run_time})
            inputs.append({'type': "TimeScaleFactor",
                           'value': self._time_scale_factor})
            inputs.append({'type': "LoadedReverseIPTagsToken",
                           'value': True})
            inputs.append({'type': "LoadedIPTagsToken", 'value': True})
            inputs.append({'type': "LoadedRoutingTablesToken",
                           'value': True})
            inputs.append({'type': "LoadBinariesToken", 'value': True})
            inputs.append({'type': "LoadedApplicationDataToken",
                           'value': True})
            inputs.append({'type': "MemoryPlacements",
                           'value': self._placements})
            inputs.append({'type': "MemoryGraphMapper",
                           'value': self._graph_mapper})
            inputs.append({'type': "MemoryPartitionableGraph",
                           'value': self._partitionable_graph})
            inputs.append({'type': "MemoryExtendedMachine",
                           'value': self._machine})
            inputs.append({'type': "MemoryRoutingTables",
                           'value': self._router_tables})
            inputs.append({'type': "ProvenanceFilePath",
                           'value': provenance_file_path})
            inputs.append({'type': "RanToken", 'value': self._has_ran})

        return inputs, application_graph_changed
コード例 #13
0
ファイル: spinnaker.py プロジェクト: astoeckel/sPyNNaker
    def __init__(self, host_name=None, timestep=None, min_delay=None,
                 max_delay=None, graph_label=None,
                 database_socket_addresses=None):

        self._hostname = host_name

        # update graph label if needed
        if graph_label is None:
            graph_label = "Application_graph"

        # delays parameters
        self._min_supported_delay = None
        self._max_supported_delay = None

        # pacman objects
        self._partitionable_graph = PartitionableGraph(label=graph_label)
        self._partitioned_graph = None
        self._graph_mapper = None
        self._placements = None
        self._router_tables = None
        self._routing_infos = None
        self._tags = None
        self._machine = None
        self._txrx = None
        self._reports_states = None
        self._app_id = None
        self._buffer_manager = None

        # database objects
        self._database_socket_addresses = set()
        if database_socket_addresses is not None:
            self._database_socket_addresses.union(database_socket_addresses)
        self._database_interface = None
        self._create_database = None
        self._database_file_path = None

        # Determine default executable folder location
        # and add this default to end of list of search paths
        executable_finder.add_path(os.path.dirname(model_binaries.__file__))

        # population holders
        self._populations = list()
        self._projections = list()
        self._multi_cast_vertex = None
        self._edge_count = 0
        self._live_spike_recorder = dict()

        # holder for the executable targets (which we will need for reset and
        # pause and resume functionality
        self._executable_targets = None

        # holders for data needed for reset when nothing changes in the
        # application graph
        self._processor_to_app_data_base_address_mapper = None
        self._placement_to_app_data_file_paths = None

        # holder for timing related values
        self._has_ran = False
        self._has_reset_last = False
        self._current_run_ms = 0
        self._no_machine_time_steps = None
        self._machine_time_step = None
        self._no_sync_changes = 0

        # state thats needed the first time around
        if self._app_id is None:
            self._app_id = config.getint("Machine", "appID")

            if config.getboolean("Reports", "reportsEnabled"):
                self._reports_states = ReportState(
                    config.getboolean("Reports", "writePartitionerReports"),
                    config.getboolean("Reports",
                                      "writePlacerReportWithPartitionable"),
                    config.getboolean("Reports",
                                      "writePlacerReportWithoutPartitionable"),
                    config.getboolean("Reports", "writeRouterReports"),
                    config.getboolean("Reports", "writeRouterInfoReport"),
                    config.getboolean("Reports", "writeTextSpecs"),
                    config.getboolean("Reports", "writeReloadSteps"),
                    config.getboolean("Reports", "writeTransceiverReport"),
                    config.getboolean("Reports", "outputTimesForSections"),
                    config.getboolean("Reports", "writeTagAllocationReports"))

            # set up reports default folder
            self._report_default_directory, this_run_time_string = \
                helpful_functions.set_up_report_specifics(
                    default_report_file_path=config.get(
                        "Reports", "defaultReportFilePath"),
                    max_reports_kept=config.getint(
                        "Reports", "max_reports_kept"),
                    app_id=self._app_id)

            # set up application report folder
            self._app_data_runtime_folder = \
                helpful_functions.set_up_output_application_data_specifics(
                    max_application_binaries_kept=config.getint(
                        "Reports", "max_application_binaries_kept"),
                    where_to_write_application_data_files=config.get(
                        "Reports", "defaultApplicationDataFilePath"),
                    app_id=self._app_id,
                    this_run_time_string=this_run_time_string)

        self._spikes_per_second = float(config.getfloat(
            "Simulation", "spikes_per_second"))
        self._ring_buffer_sigma = float(config.getfloat(
            "Simulation", "ring_buffer_sigma"))

        # set up machine targeted data
        self._set_up_machine_specifics(timestep, min_delay, max_delay,
                                       host_name)

        logger.info("Setting time scale factor to {}."
                    .format(self._time_scale_factor))

        logger.info("Setting appID to %d." % self._app_id)

        # get the machine time step
        logger.info("Setting machine time step to {} micro-seconds."
                    .format(self._machine_time_step))
コード例 #14
0
ファイル: spinnaker.py プロジェクト: astoeckel/sPyNNaker
    def _set_up_machine_specifics(self, timestep, min_delay, max_delay,
                                  hostname):
        self._machine_time_step = config.getint("Machine", "machineTimeStep")

        # deal with params allowed via the setup options
        if timestep is not None:

            # convert into milliseconds from microseconds
            timestep *= 1000
            self._machine_time_step = timestep

        if min_delay is not None and float(min_delay * 1000) < 1.0 * timestep:
            raise common_exceptions.ConfigurationException(
                "Pacman does not support min delays below {} ms with the "
                "current machine time step"
                .format(constants.MIN_SUPPORTED_DELAY * timestep))

        natively_supported_delay_for_models = \
            constants.MAX_SUPPORTED_DELAY_TICS
        delay_extention_max_supported_delay = \
            constants.MAX_DELAY_BLOCKS \
            * constants.MAX_TIMER_TICS_SUPPORTED_PER_BLOCK

        max_delay_tics_supported = \
            natively_supported_delay_for_models + \
            delay_extention_max_supported_delay

        if max_delay is not None\
           and float(max_delay * 1000) > max_delay_tics_supported * timestep:
            raise common_exceptions.ConfigurationException(
                "Pacman does not support max delays above {} ms with the "
                "current machine time step".format(0.144 * timestep))
        if min_delay is not None:
            self._min_supported_delay = min_delay
        else:
            self._min_supported_delay = timestep / 1000.0

        if max_delay is not None:
            self._max_supported_delay = max_delay
        else:
            self._max_supported_delay = (max_delay_tics_supported *
                                         (timestep / 1000.0))

        if (config.has_option("Machine", "timeScaleFactor") and
                config.get("Machine", "timeScaleFactor") != "None"):
            self._time_scale_factor = \
                config.getint("Machine", "timeScaleFactor")
            if timestep * self._time_scale_factor < 1000:
                logger.warn("the combination of machine time step and the "
                            "machine time scale factor results in a real "
                            "timer tick that is currently not reliably "
                            "supported by the spinnaker machine.")
        else:
            self._time_scale_factor = max(1,
                                          math.ceil(1000.0 / float(timestep)))
            if self._time_scale_factor > 1:
                logger.warn("A timestep was entered that has forced pacman103 "
                            "to automatically slow the simulation down from "
                            "real time by a factor of {}. To remove this "
                            "automatic behaviour, please enter a "
                            "timescaleFactor value in your .pacman.cfg"
                            .format(self._time_scale_factor))

        if hostname is not None:
            self._hostname = hostname
            logger.warn("The machine name from PYNN setup is overriding the "
                        "machine name defined in the spynnaker.cfg file")
        elif config.has_option("Machine", "machineName"):
            self._hostname = config.get("Machine", "machineName")
        else:
            raise Exception("A SpiNNaker machine must be specified in "
                            "spynnaker.cfg.")
        use_virtual_board = config.getboolean("Machine", "virtual_board")
        if self._hostname == 'None' and not use_virtual_board:
            raise Exception("A SpiNNaker machine must be specified in "
                            "spynnaker.cfg.")
コード例 #15
0
    def __init__(self,
                 n_neurons,
                 binary,
                 label,
                 max_atoms_per_core,
                 machine_time_step,
                 timescale_factor,
                 spikes_per_second,
                 ring_buffer_sigma,
                 incoming_spike_buffer_size,
                 model_name,
                 neuron_model,
                 input_type,
                 synapse_type,
                 threshold_type,
                 additional_input=None,
                 constraints=None):

        AbstractPartitionableVertex.__init__(self, n_neurons, label,
                                             max_atoms_per_core, constraints)
        AbstractDataSpecableVertex.__init__(self, machine_time_step,
                                            timescale_factor)
        AbstractSpikeRecordable.__init__(self)
        AbstractVRecordable.__init__(self)
        AbstractGSynRecordable.__init__(self)
        AbstractProvidesOutgoingPartitionConstraints.__init__(self)
        AbstractProvidesIncomingPartitionConstraints.__init__(self)
        AbstractPopulationInitializable.__init__(self)
        AbstractPopulationSettable.__init__(self)
        AbstractChangableAfterRun.__init__(self)

        self._binary = binary
        self._label = label
        self._machine_time_step = machine_time_step
        self._timescale_factor = timescale_factor
        self._incoming_spike_buffer_size = incoming_spike_buffer_size
        if incoming_spike_buffer_size is None:
            self._incoming_spike_buffer_size = config.getint(
                "Simulation", "incoming_spike_buffer_size")

        self._model_name = model_name
        self._neuron_model = neuron_model
        self._input_type = input_type
        self._threshold_type = threshold_type
        self._additional_input = additional_input

        # Set up for recording
        self._spike_recorder = SpikeRecorder(machine_time_step)
        self._v_recorder = VRecorder(machine_time_step)
        self._gsyn_recorder = GsynRecorder(machine_time_step)
        self._spike_buffer_max_size = config.getint("Buffers",
                                                    "spike_buffer_size")
        self._v_buffer_max_size = config.getint("Buffers", "v_buffer_size")
        self._gsyn_buffer_max_size = config.getint("Buffers",
                                                   "gsyn_buffer_size")
        self._buffer_size_before_receive = config.getint(
            "Buffers", "buffer_size_before_receive")
        self._time_between_requests = config.getint("Buffers",
                                                    "time_between_requests")
        self._minimum_buffer_sdram = config.getint("Buffers",
                                                   "minimum_buffer_sdram")
        self._using_auto_pause_and_resume = config.getboolean(
            "Buffers", "use_auto_pause_and_resume")
        self._receive_buffer_host = config.get("Buffers",
                                               "receive_buffer_host")
        self._receive_buffer_port = config.getint("Buffers",
                                                  "receive_buffer_port")
        self._enable_buffered_recording = config.getboolean(
            "Buffers", "enable_buffered_recording")

        # Set up synapse handling
        self._synapse_manager = SynapticManager(synapse_type,
                                                machine_time_step,
                                                ring_buffer_sigma,
                                                spikes_per_second)

        # bool for if state has changed.
        self._change_requires_mapping = True
コード例 #16
0
 def set_recording_spikes(self):
     ip_address = config.get("Buffers", "receive_buffer_host")
     port = config.getint("Buffers", "receive_buffer_port")
     self.set_buffering_output(ip_address, port)
     self._spike_recorder.record = True
コード例 #17
0
    def __init__(
        self,
        n_neurons,
        machine_time_step,
        timescale_factor,
        spike_times=None,
        port=None,
        tag=None,
        ip_address=None,
        board_address=None,
        max_on_chip_memory_usage_for_spikes_in_bytes=(constants.SPIKE_BUFFER_SIZE_BUFFERING_IN),
        space_before_notification=640,
        constraints=None,
        label="SpikeSourceArray",
        spike_recorder_buffer_size=(constants.EIEIO_SPIKE_BUFFER_SIZE_BUFFERING_OUT),
        buffer_size_before_receive=(constants.EIEIO_BUFFER_SIZE_BEFORE_RECEIVE),
    ):
        self._ip_address = ip_address
        if ip_address is None:
            self._ip_address = config.get("Buffers", "receive_buffer_host")
        self._port = port
        if port is None:
            self._port = config.getint("Buffers", "receive_buffer_port")
        if spike_times is None:
            spike_times = []

        ReverseIpTagMultiCastSource.__init__(
            self,
            n_keys=n_neurons,
            machine_time_step=machine_time_step,
            timescale_factor=timescale_factor,
            label=label,
            constraints=constraints,
            max_atoms_per_core=(SpikeSourceArray._model_based_max_atoms_per_core),
            board_address=board_address,
            receive_port=None,
            receive_sdp_port=None,
            receive_tag=None,
            virtual_key=None,
            prefix=None,
            prefix_type=None,
            check_keys=False,
            send_buffer_times=spike_times,
            send_buffer_max_space=max_on_chip_memory_usage_for_spikes_in_bytes,
            send_buffer_space_before_notify=space_before_notification,
            send_buffer_notification_ip_address=self._ip_address,
            send_buffer_notification_port=self._port,
            send_buffer_notification_tag=tag,
        )
        AbstractSpikeRecordable.__init__(self)
        AbstractProvidesOutgoingEdgeConstraints.__init__(self)
        SimplePopulationSettable.__init__(self)
        AbstractMappable.__init__(self)
        AbstractHasFirstMachineTimeStep.__init__(self)

        # handle recording
        self._spike_recorder = EIEIOSpikeRecorder(machine_time_step)
        self._spike_recorder_buffer_size = spike_recorder_buffer_size
        self._buffer_size_before_receive = buffer_size_before_receive

        # Keep track of any previously generated buffers
        self._send_buffers = dict()
        self._spike_recording_region_size = None
        self._partitioned_vertices = list()
        self._partitioned_vertices_current_max_buffer_size = dict()

        # used for reset and rerun
        self._requires_mapping = True
        self._last_runtime_position = 0

        self._max_on_chip_memory_usage_for_spikes = max_on_chip_memory_usage_for_spikes_in_bytes
        self._space_before_notification = space_before_notification
        if self._max_on_chip_memory_usage_for_spikes is None:
            self._max_on_chip_memory_usage_for_spikes = front_end_common_constants.MAX_SIZE_OF_BUFFERED_REGION_ON_CHIP

        # check the values do not conflict with chip memory limit
        if self._max_on_chip_memory_usage_for_spikes < 0:
            raise exceptions.ConfigurationException(
                "The memory usage on chip is either beyond what is supportable"
                " on the spinnaker board being supported or you have requested"
                " a negative value for a memory usage. Please correct and"
                " try again"
            )

        if self._max_on_chip_memory_usage_for_spikes < self._space_before_notification:
            self._space_before_notification = self._max_on_chip_memory_usage_for_spikes
コード例 #18
0
    def __init__(
            self, n_neurons, machine_time_step, timescale_factor,
            spike_times=None, port=None, tag=None, ip_address=None,
            board_address=None, max_on_chip_memory_usage_for_spikes_in_bytes=(
                constants.SPIKE_BUFFER_SIZE_BUFFERING_IN),
            space_before_notification=640,
            constraints=None, label="SpikeSourceArray",
            spike_recorder_buffer_size=(
                constants.EIEIO_SPIKE_BUFFER_SIZE_BUFFERING_OUT),
            buffer_size_before_receive=(
                constants.EIEIO_BUFFER_SIZE_BEFORE_RECEIVE)):
        self._ip_address = ip_address
        if ip_address is None:
            self._ip_address = config.get("Buffers", "receive_buffer_host")
        self._port = port
        if port is None:
            self._port = config.getint("Buffers", "receive_buffer_port")
        if spike_times is None:
            spike_times = []
        self._minimum_sdram_for_buffering = config.getint(
            "Buffers", "minimum_buffer_sdram")
        self._using_auto_pause_and_resume = config.getboolean(
            "Buffers", "use_auto_pause_and_resume")

        ReverseIpTagMultiCastSource.__init__(
            self, n_keys=n_neurons, machine_time_step=machine_time_step,
            timescale_factor=timescale_factor, label=label,
            constraints=constraints,
            max_atoms_per_core=(SpikeSourceArray.
                                _model_based_max_atoms_per_core),
            board_address=board_address,
            receive_port=None, receive_sdp_port=None, receive_tag=None,
            virtual_key=None, prefix=None, prefix_type=None, check_keys=False,
            send_buffer_times=spike_times,
            send_buffer_max_space=max_on_chip_memory_usage_for_spikes_in_bytes,
            send_buffer_space_before_notify=space_before_notification,
            send_buffer_notification_ip_address=self._ip_address,
            send_buffer_notification_port=self._port,
            send_buffer_notification_tag=tag)

        AbstractSpikeRecordable.__init__(self)
        AbstractProvidesOutgoingPartitionConstraints.__init__(self)
        SimplePopulationSettable.__init__(self)
        AbstractChangableAfterRun.__init__(self)
        AbstractHasFirstMachineTimeStep.__init__(self)

        # handle recording
        self._spike_recorder = EIEIOSpikeRecorder(machine_time_step)
        self._spike_recorder_buffer_size = spike_recorder_buffer_size
        self._buffer_size_before_receive = buffer_size_before_receive

        # Keep track of any previously generated buffers
        self._send_buffers = dict()
        self._spike_recording_region_size = None
        self._partitioned_vertices = list()
        self._partitioned_vertices_current_max_buffer_size = dict()

        # used for reset and rerun
        self._requires_mapping = True
        self._last_runtime_position = 0

        self._max_on_chip_memory_usage_for_spikes = \
            max_on_chip_memory_usage_for_spikes_in_bytes
        self._space_before_notification = space_before_notification
        if self._max_on_chip_memory_usage_for_spikes is None:
            self._max_on_chip_memory_usage_for_spikes = \
                front_end_common_constants.MAX_SIZE_OF_BUFFERED_REGION_ON_CHIP

        # check the values do not conflict with chip memory limit
        if self._max_on_chip_memory_usage_for_spikes < 0:
            raise exceptions.ConfigurationException(
                "The memory usage on chip is either beyond what is supportable"
                " on the spinnaker board being supported or you have requested"
                " a negative value for a memory usage. Please correct and"
                " try again")

        if (self._max_on_chip_memory_usage_for_spikes <
                self._space_before_notification):
            self._space_before_notification =\
                self._max_on_chip_memory_usage_for_spikes
コード例 #19
0
ファイル: spinnaker.py プロジェクト: jackokaiser/sPyNNaker
    def __init__(self,
                 host_name=None,
                 timestep=None,
                 min_delay=None,
                 max_delay=None,
                 graph_label=None,
                 database_socket_addresses=None):
        FrontEndCommonConfigurationFunctions.__init__(self, host_name,
                                                      graph_label)
        SpynnakerConfigurationFunctions.__init__(self)
        FrontEndCommonProvenanceFunctions.__init__(self)

        self._database_socket_addresses = set()
        self._database_interface = None
        self._create_database = None
        self._populations = list()

        if self._app_id is None:
            self._set_up_main_objects(
                app_id=config.getint("Machine", "appID"),
                execute_data_spec_report=config.getboolean(
                    "Reports", "writeTextSpecs"),
                execute_partitioner_report=config.getboolean(
                    "Reports", "writePartitionerReports"),
                execute_placer_report=config.getboolean(
                    "Reports", "writePlacerReports"),
                execute_router_dat_based_report=config.getboolean(
                    "Reports", "writeRouterDatReport"),
                reports_are_enabled=config.getboolean("Reports",
                                                      "reportsEnabled"),
                generate_performance_measurements=config.getboolean(
                    "Reports", "outputTimesForSections"),
                execute_router_report=config.getboolean(
                    "Reports", "writeRouterReports"),
                execute_write_reload_steps=config.getboolean(
                    "Reports", "writeReloadSteps"),
                generate_transciever_report=config.getboolean(
                    "Reports", "writeTransceiverReport"),
                execute_routing_info_report=config.getboolean(
                    "Reports", "writeRouterInfoReport"),
                in_debug_mode=config.get("Mode", "mode") == "Debug",
                generate_tag_report=config.getboolean(
                    "Reports", "writeTagAllocationReports"))

            self._set_up_pacman_algorthms_listings(
                partitioner_algorithm=config.get("Partitioner", "algorithm"),
                placer_algorithm=config.get("Placer", "algorithm"),
                key_allocator_algorithm=config.get("KeyAllocator",
                                                   "algorithm"),
                routing_algorithm=config.get("Routing", "algorithm"))

            # set up exeuctable specifics
            self._set_up_executable_specifics()
            self._set_up_report_specifics(
                default_report_file_path=config.get("Reports",
                                                    "defaultReportFilePath"),
                max_reports_kept=config.getint("Reports", "max_reports_kept"),
                reports_are_enabled=config.getboolean("Reports",
                                                      "reportsEnabled"),
                write_provance_data=config.getboolean("Reports",
                                                      "writeProvanceData"),
                write_text_specs=config.getboolean("Reports",
                                                   "writeTextSpecs"))
            self._set_up_output_application_data_specifics(
                max_application_binaries_kept=config.getint(
                    "Reports", "max_application_binaries_kept"),
                where_to_write_application_data_files=config.get(
                    "Reports", "defaultApplicationDataFilePath"))

        # set up spynnaker specifics, such as setting the machineName from conf
        self._set_up_machine_specifics(timestep, min_delay, max_delay,
                                       host_name)
        self._spikes_per_second = float(
            config.getfloat("Simulation", "spikes_per_second"))
        self._ring_buffer_sigma = float(
            config.getfloat("Simulation", "ring_buffer_sigma"))

        # Determine default executable folder location
        # and add this default to end of list of search paths
        executable_finder.add_path(os.path.dirname(model_binaries.__file__))

        FrontEndCommonInterfaceFunctions.__init__(
            self, self._reports_states, self._report_default_directory,
            self._app_data_runtime_folder)

        logger.info("Setting time scale factor to {}.".format(
            self._time_scale_factor))

        logger.info("Setting appID to %d." % self._app_id)

        # get the machine time step
        logger.info("Setting machine time step to {} micro-seconds.".format(
            self._machine_time_step))

        self._edge_count = 0

        # Manager of buffered sending
        self._send_buffer_manager = None
コード例 #20
0
ファイル: board_test.py プロジェクト: ruthvik92/PyNNExamples
height = config.get("Machine", "height")
if width == "None":
    width = None
else:
    width = int(width)
if height == "None":
    height = None
else:
    height = int(height)

number_of_boards = config.get("Machine", "number_of_boards")
if number_of_boards == "None":
    number_of_boards = None

hostname = config.get("Machine", "machineName")
board_version = config.getint("Machine", "version")

interfacer = FrontEndCommonMachineInterfacer()
interfacer_results = interfacer(
    hostname=hostname,
    bmp_details=config.get("Machine", "bmp_names"),
    downed_chips=config.get("Machine", "down_chips"),
    downed_cores=config.get("Machine", "down_cores"),
    board_version=board_version,
    number_of_boards=number_of_boards, width=width, height=height,
    auto_detect_bmp=config.getboolean("Machine", "auto_detect_bmp"),
    enable_reinjection=config.getboolean("Machine", "enable_reinjection"),
    scamp_connection_data=None, boot_port_num=None,
    reset_machine_on_start_up=config.get(
        "Machine", "reset_machine_on_startup"))
コード例 #21
0
    def __init__(
            self, n_neurons, binary, label, max_atoms_per_core,
            machine_time_step, timescale_factor, spikes_per_second,
            ring_buffer_sigma, model_name, neuron_model, input_type,
            synapse_type, threshold_type, additional_input=None,
            constraints=None):

        ReceiveBuffersToHostBasicImpl.__init__(self)
        AbstractPartitionableVertex.__init__(
            self, n_neurons, label, max_atoms_per_core, constraints)
        AbstractDataSpecableVertex.__init__(
            self, machine_time_step, timescale_factor)
        AbstractSpikeRecordable.__init__(self)
        AbstractVRecordable.__init__(self)
        AbstractGSynRecordable.__init__(self)
        AbstractProvidesOutgoingEdgeConstraints.__init__(self)
        AbstractProvidesIncomingEdgeConstraints.__init__(self)
        AbstractPopulationInitializable.__init__(self)
        AbstractPopulationSettable.__init__(self)
        AbstractMappable.__init__(self)

        self._binary = binary
        self._label = label
        self._machine_time_step = machine_time_step
        self._timescale_factor = timescale_factor

        self._model_name = model_name
        self._neuron_model = neuron_model
        self._input_type = input_type
        self._threshold_type = threshold_type
        self._additional_input = additional_input

        # Set up for recording
        self._spike_recorder = SpikeRecorder(machine_time_step)
        self._v_recorder = VRecorder(machine_time_step)
        self._gsyn_recorder = GsynRecorder(machine_time_step)
        self._spike_buffer_max_size = config.getint(
            "Buffers", "spike_buffer_size")
        self._v_buffer_max_size = config.getint(
            "Buffers", "v_buffer_size")
        self._gsyn_buffer_max_size = config.getint(
            "Buffers", "gsyn_buffer_size")
        self._buffer_size_before_receive = config.getint(
            "Buffers", "buffer_size_before_receive")
        self._time_between_requests = config.getint(
            "Buffers", "time_between_requests")

        # Set up synapse handling
        self._synapse_manager = SynapticManager(
            synapse_type, machine_time_step, ring_buffer_sigma,
            spikes_per_second)

        # Get buffering information for later use
        self._receive_buffer_host = config.get(
            "Buffers", "receive_buffer_host")
        self._receive_buffer_port = config.getint(
            "Buffers", "receive_buffer_port")
        self._enable_buffered_recording = config.getboolean(
            "Buffers", "enable_buffered_recording")

        # bool for if state has changed.
        self._change_requires_mapping = True
コード例 #22
0
ファイル: spinnaker.py プロジェクト: tophensen/sPyNNaker
    def _create_pacman_executor_inputs(self):

        # make a folder for the json files to be stored in
        json_folder = os.path.join(
            self._report_default_directory, "json_files")
        if not os.path.exists(json_folder):
            os.mkdir(json_folder)

        # file path to store any provenance data to
        provenance_file_path = os.path.join(self._report_default_directory,
                                            "provance_data")
        if not os.path.exists(provenance_file_path):
            os.mkdir(provenance_file_path)

        # translate config "None" to None
        width = config.get("Machine", "width")
        height = config.get("Machine", "height")
        if width == "None":
            width = None
        else:
            width = int(width)
        if height == "None":
            height = None
        else:
            height = int(height)

        number_of_boards = config.get("Machine", "number_of_boards")
        if number_of_boards == "None":
            number_of_boards = None

        scamp_socket_addresses = config.get(
            "Machine", "scamp_connections_data")
        if scamp_socket_addresses == "None":
            scamp_socket_addresses = None

        boot_port_num = config.get("Machine", "boot_connection_port_num")
        if boot_port_num == "None":
            boot_port_num = None
        else:
            boot_port_num = int(boot_port_num)

        inputs = list()
        inputs.append({'type': "MemoryPartitionableGraph",
                       'value': self._partitionable_graph})
        inputs.append({'type': 'ReportFolder',
                       'value': self._report_default_directory})
        inputs.append({'type': "ApplicationDataFolder",
                       'value': self._app_data_runtime_folder})
        inputs.append({'type': 'IPAddress', 'value': self._hostname})

        # basic input stuff
        inputs.append({'type': "BMPDetails",
                       'value': config.get("Machine", "bmp_names")})
        inputs.append({'type': "DownedChipsDetails",
                       'value': config.get("Machine", "down_chips")})
        inputs.append({'type': "DownedCoresDetails",
                       'value': config.get("Machine", "down_cores")})
        inputs.append({'type': "BoardVersion",
                       'value': config.getint("Machine", "version")})
        inputs.append({'type': "NumberOfBoards", 'value': number_of_boards})
        inputs.append({'type': "MachineWidth", 'value': width})
        inputs.append({'type': "MachineHeight", 'value': height})
        inputs.append({'type': "AutoDetectBMPFlag",
                       'value': config.getboolean("Machine",
                                                  "auto_detect_bmp")})
        inputs.append({'type': "EnableReinjectionFlag",
                       'value': config.getboolean("Machine",
                                                  "enable_reinjection")})
        inputs.append({'type': "ScampConnectionData",
                       'value': scamp_socket_addresses})
        inputs.append({'type': "BootPortNum", 'value': boot_port_num})
        inputs.append({'type': "APPID", 'value': self._app_id})
        inputs.append({'type': "RunTime", 'value': self._runtime})
        inputs.append({'type': "TimeScaleFactor",
                       'value': self._time_scale_factor})
        inputs.append({'type': "MachineTimeStep",
                       'value': self._machine_time_step})
        inputs.append({'type': "DatabaseSocketAddresses",
                       'value': self._database_socket_addresses})
        inputs.append({'type': "DatabaseWaitOnConfirmationFlag",
                       'value': config.getboolean("Database",
                                                  "wait_on_confirmation")})
        inputs.append({'type': "WriteCheckerFlag",
                       'value': config.getboolean("Mode", "verify_writes")})
        inputs.append({'type': "WriteTextSpecsFlag",
                       'value': config.getboolean("Reports",
                                                  "writeTextSpecs")})
        inputs.append({'type': "ExecutableFinder", 'value': executable_finder})
        inputs.append({'type': "MachineHasWrapAroundsFlag",
                       'value': config.getboolean("Machine",
                                                  "requires_wrap_arounds")})
        inputs.append({'type': "ReportStates", 'value': self._reports_states})
        inputs.append({'type': "UserCreateDatabaseFlag",
                       'value': config.get("Database", "create_database")})
        inputs.append({'type': "ExecuteMapping",
                       'value': config.getboolean(
                           "Database",
                           "create_routing_info_to_neuron_id_mapping")})
        inputs.append({'type': "DatabaseSocketAddresses",
                       'value': self._database_socket_addresses})
        inputs.append({'type': "SendStartNotifications",
                       'value': config.getboolean("Database",
                                                  "send_start_notification")})
        inputs.append({'type': "ProvenanceFilePath",
                       'value': provenance_file_path})

        # add paths for each file based version
        inputs.append({'type': "FileCoreAllocationsFilePath",
                       'value': os.path.join(
                           json_folder, "core_allocations.json")})
        inputs.append({'type': "FileSDRAMAllocationsFilePath",
                       'value': os.path.join(
                           json_folder, "sdram_allocations.json")})
        inputs.append({'type': "FileMachineFilePath",
                       'value': os.path.join(
                           json_folder, "machine.json")})
        inputs.append({'type': "FilePartitionedGraphFilePath",
                       'value': os.path.join(
                           json_folder, "partitioned_graph.json")})
        inputs.append({'type': "FilePlacementFilePath",
                       'value': os.path.join(
                           json_folder, "placements.json")})
        inputs.append({'type': "FileRouingPathsFilePath",
                       'value': os.path.join(
                           json_folder, "routing_paths.json")})
        inputs.append({'type': "FileConstraintsFilePath",
                       'value': os.path.join(
                           json_folder, "constraints.json")})
        return inputs
コード例 #23
0
ファイル: spinnaker.py プロジェクト: ominux/sPyNNaker
    def run(self, run_time):
        """

        :param run_time:
        :return:
        """
        # sort out config param to be valid types
        width = config.get("Machine", "width")
        height = config.get("Machine", "height")
        if width == "None":
            width = None
        else:
            width = int(width)
        if height == "None":
            height = None
        else:
            height = int(height)

        number_of_boards = config.get("Machine", "number_of_boards")
        if number_of_boards == "None":
            number_of_boards = None

        self.setup_interfaces(
            hostname=self._hostname,
            bmp_details=config.get("Machine", "bmp_names"),
            downed_chips=config.get("Machine", "down_chips"),
            downed_cores=config.get("Machine", "down_cores"),
            board_version=config.getint("Machine", "version"),
            number_of_boards=number_of_boards, width=width, height=height,
            is_virtual=config.getboolean("Machine", "virtual_board"),
            virtual_has_wrap_arounds=config.getboolean(
                "Machine", "requires_wrap_arounds"),
            auto_detect_bmp=config.getboolean("Machine", "auto_detect_bmp"))

        # adds extra stuff needed by the reload script which cannot be given
        # directly.
        if self._reports_states.transciever_report:
            self._reload_script.runtime = run_time
            self._reload_script.time_scale_factor = self._time_scale_factor

        # create network report if needed
        if self._reports_states is not None:
            reports.network_specification_partitionable_report(
                self._report_default_directory, self._partitionable_graph,
                self._hostname)

        # calculate number of machine time steps
        if run_time is not None:
            self._no_machine_time_steps =\
                int((run_time * 1000.0) / self._machine_time_step)
            ceiled_machine_time_steps = \
                math.ceil((run_time * 1000.0) / self._machine_time_step)
            if self._no_machine_time_steps != ceiled_machine_time_steps:
                raise common_exceptions.ConfigurationException(
                    "The runtime and machine time step combination result in "
                    "a factional number of machine runable time steps and "
                    "therefore spinnaker cannot determine how many to run for")
            for vertex in self._partitionable_graph.vertices:
                if isinstance(vertex, AbstractDataSpecableVertex):
                    vertex.set_no_machine_time_steps(
                        self._no_machine_time_steps)
        else:
            self._no_machine_time_steps = None
            logger.warn("You have set a runtime that will never end, this may"
                        "cause the neural models to fail to partition "
                        "correctly")
            for vertex in self._partitionable_graph.vertices:
                if (isinstance(vertex, AbstractPopulationRecordableVertex) and
                        vertex.record):
                    raise common_exceptions.ConfigurationException(
                        "recording a population when set to infinite runtime "
                        "is not currently supportable in this tool chain."
                        "watch this space")

        do_timing = config.getboolean("Reports", "outputTimesForSections")
        if do_timing:
            timer = Timer()
        else:
            timer = None

        self.set_runtime(run_time)
        logger.info("*** Running Mapper *** ")
        if do_timing:
            timer.start_timing()
        self.map_model()
        if do_timing:
            timer.take_sample()

        # add database generation if requested
        needs_database = self._auto_detect_database(self._partitioned_graph)
        user_create_database = config.get("Database", "create_database")
        if ((user_create_database == "None" and needs_database) or
                user_create_database == "True"):

            wait_on_confirmation = config.getboolean(
                "Database", "wait_on_confirmation")
            self._database_interface = SpynnakerDataBaseInterface(
                self._app_data_runtime_folder, wait_on_confirmation,
                self._database_socket_addresses)

            self._database_interface.add_system_params(
                self._time_scale_factor, self._machine_time_step,
                self._runtime)
            self._database_interface.add_machine_objects(self._machine)
            self._database_interface.add_partitionable_vertices(
                self._partitionable_graph)
            self._database_interface.add_partitioned_vertices(
                self._partitioned_graph, self._graph_mapper,
                self._partitionable_graph)
            self._database_interface.add_placements(self._placements,
                                                    self._partitioned_graph)
            self._database_interface.add_routing_infos(
                self._routing_infos, self._partitioned_graph)
            self._database_interface.add_routing_tables(self._router_tables)
            self._database_interface.add_tags(self._partitioned_graph,
                                              self._tags)
            execute_mapping = config.getboolean(
                "Database", "create_routing_info_to_neuron_id_mapping")
            if execute_mapping:
                self._database_interface.create_neuron_to_key_mapping(
                    graph_mapper=self._graph_mapper,
                    partitionable_graph=self._partitionable_graph,
                    partitioned_graph=self._partitioned_graph,
                    routing_infos=self._routing_infos)
            # if using a reload script, add if that needs to wait for
            # confirmation
            if self._reports_states.transciever_report:
                self._reload_script.wait_on_confirmation = wait_on_confirmation
                for socket_address in self._database_socket_addresses:
                    self._reload_script.add_socket_address(socket_address)
            self._database_interface.send_read_notification()

        # execute data spec generation
        if do_timing:
            timer.start_timing()
        logger.info("*** Generating Output *** ")
        logger.debug("")
        executable_targets = self.generate_data_specifications()
        if do_timing:
            timer.take_sample()

        # execute data spec execution
        if do_timing:
            timer.start_timing()
        processor_to_app_data_base_address = \
            self.execute_data_specification_execution(
                config.getboolean("SpecExecution", "specExecOnHost"),
                self._hostname, self._placements, self._graph_mapper,
                write_text_specs=config.getboolean(
                    "Reports", "writeTextSpecs"),
                runtime_application_data_folder=self._app_data_runtime_folder,
                machine=self._machine)

        if self._reports_states is not None:
            reports.write_memory_map_report(self._report_default_directory,
                                            processor_to_app_data_base_address)

        if do_timing:
            timer.take_sample()

        if (not isinstance(self._machine, VirtualMachine) and
                config.getboolean("Execute", "run_simulation")):
            if do_timing:
                timer.start_timing()

            logger.info("*** Loading tags ***")
            self.load_tags(self._tags)

            if self._do_load is True:
                logger.info("*** Loading data ***")
                self._load_application_data(
                    self._placements, self._graph_mapper,
                    processor_to_app_data_base_address, self._hostname,
                    app_data_folder=self._app_data_runtime_folder,
                    verify=config.getboolean("Mode", "verify_writes"))
                self.load_routing_tables(self._router_tables, self._app_id)
                logger.info("*** Loading executables ***")
                self.load_executable_images(executable_targets, self._app_id)
                logger.info("*** Loading buffers ***")
                self.set_up_send_buffering(self._partitioned_graph,
                                           self._placements, self._tags)

            # end of entire loading setup
            if do_timing:
                timer.take_sample()

            if self._do_run is True:
                logger.info("*** Running simulation... *** ")
                if do_timing:
                    timer.start_timing()
                # every thing is in sync0. load the initial buffers
                self._send_buffer_manager.load_initial_buffers()
                if do_timing:
                    timer.take_sample()

                wait_on_confirmation = config.getboolean(
                    "Database", "wait_on_confirmation")
                send_start_notification = config.getboolean(
                    "Database", "send_start_notification")

                self.wait_for_cores_to_be_ready(executable_targets,
                                                self._app_id)

                # wait till external app is ready for us to start if required
                if (self._database_interface is not None and
                        wait_on_confirmation):
                    self._database_interface.wait_for_confirmation()

                self.start_all_cores(executable_targets, self._app_id)

                if (self._database_interface is not None and
                        send_start_notification):
                    self._database_interface.send_start_notification()

                if self._runtime is None:
                    logger.info("Application is set to run forever - exiting")
                else:
                    self.wait_for_execution_to_complete(
                        executable_targets, self._app_id, self._runtime,
                        self._time_scale_factor)
                self._has_ran = True
                if self._retrieve_provance_data:

                    progress = ProgressBar(self._placements.n_placements + 1,
                                           "getting provenance data")

                    # retrieve provence data from central
                    file_path = os.path.join(self._report_default_directory,
                                             "provance_data")

                    # check the directory doesnt already exist
                    if not os.path.exists(file_path):
                        os.mkdir(file_path)

                    # write provanence data
                    self.write_provenance_data_in_xml(file_path, self._txrx)
                    progress.update()

                    # retrieve provenance data from any cores that provide data
                    for placement in self._placements.placements:
                        if isinstance(placement.subvertex,
                                      AbstractProvidesProvenanceData):
                            core_file_path = os.path.join(
                                file_path,
                                "Provanence_data_for_{}_{}_{}_{}.xml".format(
                                    placement.subvertex.label,
                                    placement.x, placement.y, placement.p))
                            placement.subvertex.write_provenance_data_in_xml(
                                core_file_path, self.transceiver, placement)
                        progress.update()
                    progress.end()

        elif isinstance(self._machine, VirtualMachine):
            logger.info(
                "*** Using a Virtual Machine so no simulation will occur")
        else:
            logger.info("*** No simulation requested: Stopping. ***")
コード例 #24
0
ファイル: spinnaker.py プロジェクト: Paul92/sPyNNaker
    def run(self, run_time):
        """

        :param run_time:
        :return:
        """
        self._setup_interfaces(
            hostname=self._hostname,
            virtual_x_dimension=config.getint("Machine",
                                              "virtual_board_x_dimension"),
            virtual_y_dimension=config.getint("Machine",
                                              "virtual_board_y_dimension"),
            downed_chips=config.get("Machine", "down_chips"),
            downed_cores=config.get("Machine", "down_cores"),
            requires_virtual_board=config.getboolean("Machine",
                                                     "virtual_board"),
            requires_wrap_around=config.getboolean("Machine",
                                                   "requires_wrap_arounds"),
            machine_version=config.getint("Machine", "version"))

        # add database generation if requested
        if self._create_database:
            wait_on_confirmation = \
                config.getboolean("Database", "wait_on_confirmation")
            self._database_interface = DataBaseInterface(
                self._app_data_runtime_folder, wait_on_confirmation,
                self._database_socket_addresses)

        # create network report if needed
        if self._reports_states is not None:
            reports.network_specification_partitionable_report(
                self._report_default_directory, self._partitionable_graph,
                self._hostname)

        # calculate number of machine time steps
        if run_time is not None:
            self._no_machine_time_steps =\
                int((run_time * 1000.0) / self._machine_time_step)
            ceiled_machine_time_steps = \
                math.ceil((run_time * 1000.0) / self._machine_time_step)
            if self._no_machine_time_steps != ceiled_machine_time_steps:
                raise common_exceptions.ConfigurationException(
                    "The runtime and machine time step combination result in "
                    "a factional number of machine runable time steps and "
                    "therefore spinnaker cannot determine how many to run for")
            for vertex in self._partitionable_graph.vertices:
                if isinstance(vertex, AbstractDataSpecableVertex):
                    vertex.set_no_machine_time_steps(
                        self._no_machine_time_steps)
        else:
            self._no_machine_time_steps = None
            logger.warn("You have set a runtime that will never end, this may"
                        "cause the neural models to fail to partition "
                        "correctly")
            for vertex in self._partitionable_graph.vertices:
                if vertex.is_set_to_record_spikes():
                    raise common_exceptions.ConfigurationException(
                        "recording a population when set to infinite runtime "
                        "is not currently supportable in this tool chain."
                        "watch this space")

        do_timing = config.getboolean("Reports", "outputTimesForSections")
        if do_timing:
            timer = Timer()
        else:
            timer = None

        self.set_runtime(run_time)
        logger.info("*** Running Mapper *** ")
        if do_timing:
            timer.start_timing()
        self.map_model()
        if do_timing:
            timer.take_sample()

        # load database if needed
        if self._create_database:
            self._database_interface.add_system_params(
                self._time_scale_factor, self._machine_time_step,
                self._runtime)
            self._database_interface.add_machine_objects(self._machine)
            self._database_interface.add_partitionable_vertices(
                self._partitionable_graph)
            self._database_interface.add_partitioned_vertices(
                self._partitioned_graph, self._graph_mapper,
                self._partitionable_graph)
            self._database_interface.add_placements(self._placements,
                                                    self._partitioned_graph)
            self._database_interface.add_routing_infos(
                self._routing_infos, self._partitioned_graph)
            self._database_interface.add_routing_tables(self._router_tables)
            self._database_interface.add_tags(self._partitioned_graph,
                                              self._tags)
            execute_mapping = config.getboolean(
                "Database", "create_routing_info_to_neuron_id_mapping")
            if execute_mapping:
                self._database_interface.create_neuron_to_key_mapping(
                    graph_mapper=self._graph_mapper,
                    partitionable_graph=self._partitionable_graph,
                    partitioned_graph=self._partitioned_graph,
                    routing_infos=self._routing_infos)
            self._database_interface.send_read_notification()

        # execute data spec generation
        if do_timing:
            timer.start_timing()
        logger.info("*** Generating Output *** ")
        logger.debug("")
        executable_targets = self.generate_data_specifications()
        if do_timing:
            timer.take_sample()

        # execute data spec execution
        if do_timing:
            timer.start_timing()
        processor_to_app_data_base_address = \
            self.execute_data_specification_execution(
                config.getboolean("SpecExecution", "specExecOnHost"),
                self._hostname, self._placements, self._graph_mapper,
                write_text_specs=config.getboolean(
                    "Reports", "writeTextSpecs"),
                runtime_application_data_folder=self._app_data_runtime_folder)

        if self._reports_states is not None:
            reports.write_memory_map_report(self._report_default_directory,
                                            processor_to_app_data_base_address)

        if do_timing:
            timer.take_sample()

        if (not isinstance(self._machine, VirtualMachine) and
                config.getboolean("Execute", "run_simulation")):
            if do_timing:
                timer.start_timing()

            logger.info("*** Loading tags ***")
            self._load_tags(self._tags)

            if self._do_load is True:
                logger.info("*** Loading data ***")
                self._load_application_data(
                    self._placements, self._router_tables, self._graph_mapper,
                    processor_to_app_data_base_address, self._hostname,
                    self._app_id,
                    machine_version=config.getint("Machine", "version"),
                    app_data_folder=self._app_data_runtime_folder)
                logger.info("*** Loading executables ***")
                self._load_executable_images(
                    executable_targets, self._app_id,
                    app_data_folder=self._app_data_runtime_folder)
                logger.info("*** Loading buffers ***")
                self._set_up_send_buffering()

            # end of entire loading setup
            if do_timing:
                timer.take_sample()

            if self._do_run is True:
                logger.info("*** Running simulation... *** ")
                if self._reports_states.transciever_report:
                    reports.re_load_script_running_aspects(
                        self._app_data_runtime_folder, executable_targets,
                        self._hostname, self._app_id, run_time)

                # every thing is in sync0. load the initial buffers
                self._send_buffer_manager.load_initial_buffers()

                wait_on_confirmation = config.getboolean(
                    "Database", "wait_on_confirmation")
                send_start_notification = config.getboolean(
                    "Database", "send_start_notification")

                self._wait_for_cores_to_be_ready(executable_targets,
                                                 self._app_id)

                # wait till external app is ready for us to start if required
                if (self._database_interface is not None and
                        wait_on_confirmation):
                    logger.info(
                        "*** Awaiting for a response from an external source "
                        "to state its ready for the simulation to start ***")
                    self._database_interface.wait_for_confirmation()

                self._start_all_cores(executable_targets, self._app_id)

                if (self._database_interface is not None and
                        send_start_notification):
                    self._database_interface.send_start_notification()

                if self._runtime is None:
                    logger.info("Application is set to run forever - exiting")
                else:
                    self._wait_for_execution_to_complete(
                        executable_targets, self._app_id, self._runtime,
                        self._time_scale_factor)
                self._has_ran = True
                if self._retrieve_provance_data:

                    # retrieve provence data from central
                    file_path = os.path.join(self._report_default_directory,
                                             "provance_data")

                    # check the directory doesnt already exist
                    if not os.path.exists(file_path):
                        os.mkdir(file_path)

                    self._write_provanence_data_in_xml(file_path)

                    # retrieve provenance data from any cores that provide data
                    for placement in self._placements:
                        if isinstance(placement.subvertex,
                                      AbstractProvidesProvanenceData):
                            file_path = os.path.join(
                                self._report_default_directory,
                                "Provanence_data_for_core:{}:{}:{}"
                                .format(placement.x, placement.y, placement.p))

        elif isinstance(self._machine, VirtualMachine):
            logger.info(
                "*** Using a Virtual Machine so no simulation will occur")
        else:
            logger.info("*** No simulation requested: Stopping. ***")
コード例 #25
0
ファイル: spinnaker.py プロジェクト: jackokaiser/sPyNNaker
    def run(self, run_time):
        """

        :param run_time:
        :return:
        """
        # sort out config param to be valid types
        width = config.get("Machine", "width")
        height = config.get("Machine", "height")
        if width == "None":
            width = None
        else:
            width = int(width)
        if height == "None":
            height = None
        else:
            height = int(height)

        number_of_boards = config.get("Machine", "number_of_boards")
        if number_of_boards == "None":
            number_of_boards = None

        self.setup_interfaces(
            hostname=self._hostname,
            bmp_details=config.get("Machine", "bmp_names"),
            downed_chips=config.get("Machine", "down_chips"),
            downed_cores=config.get("Machine", "down_cores"),
            board_version=config.getint("Machine", "version"),
            number_of_boards=number_of_boards,
            width=width,
            height=height,
            is_virtual=config.getboolean("Machine", "virtual_board"),
            virtual_has_wrap_arounds=config.getboolean(
                "Machine", "requires_wrap_arounds"),
            auto_detect_bmp=config.getboolean("Machine", "auto_detect_bmp"))

        # adds extra stuff needed by the reload script which cannot be given
        # directly.
        if self._reports_states.transciever_report:
            self._reload_script.runtime = run_time
            self._reload_script.time_scale_factor = self._time_scale_factor

        # create network report if needed
        if self._reports_states is not None:
            reports.network_specification_partitionable_report(
                self._report_default_directory, self._partitionable_graph,
                self._hostname)

        # calculate number of machine time steps
        if run_time is not None:
            self._no_machine_time_steps =\
                int((run_time * 1000.0) / self._machine_time_step)
            ceiled_machine_time_steps = \
                math.ceil((run_time * 1000.0) / self._machine_time_step)
            if self._no_machine_time_steps != ceiled_machine_time_steps:
                raise common_exceptions.ConfigurationException(
                    "The runtime and machine time step combination result in "
                    "a factional number of machine runable time steps and "
                    "therefore spinnaker cannot determine how many to run for")
            for vertex in self._partitionable_graph.vertices:
                if isinstance(vertex, AbstractDataSpecableVertex):
                    vertex.set_no_machine_time_steps(
                        self._no_machine_time_steps)
        else:
            self._no_machine_time_steps = None
            logger.warn("You have set a runtime that will never end, this may"
                        "cause the neural models to fail to partition "
                        "correctly")
            for vertex in self._partitionable_graph.vertices:
                if (isinstance(vertex, AbstractPopulationRecordableVertex)
                        and vertex.record):
                    raise common_exceptions.ConfigurationException(
                        "recording a population when set to infinite runtime "
                        "is not currently supportable in this tool chain."
                        "watch this space")

        do_timing = config.getboolean("Reports", "outputTimesForSections")
        if do_timing:
            timer = Timer()
        else:
            timer = None

        self.set_runtime(run_time)
        logger.info("*** Running Mapper *** ")
        if do_timing:
            timer.start_timing()
        self.map_model()
        if do_timing:
            timer.take_sample()

        # add database generation if requested
        needs_database = self._auto_detect_database(self._partitioned_graph)
        user_create_database = config.get("Database", "create_database")
        if ((user_create_database == "None" and needs_database)
                or user_create_database == "True"):

            wait_on_confirmation = config.getboolean("Database",
                                                     "wait_on_confirmation")
            self._database_interface = SpynnakerDataBaseInterface(
                self._app_data_runtime_folder, wait_on_confirmation,
                self._database_socket_addresses)

            self._database_interface.add_system_params(self._time_scale_factor,
                                                       self._machine_time_step,
                                                       self._runtime)
            self._database_interface.add_machine_objects(self._machine)
            self._database_interface.add_partitionable_vertices(
                self._partitionable_graph)
            self._database_interface.add_partitioned_vertices(
                self._partitioned_graph, self._graph_mapper,
                self._partitionable_graph)
            self._database_interface.add_placements(self._placements,
                                                    self._partitioned_graph)
            self._database_interface.add_routing_infos(self._routing_infos,
                                                       self._partitioned_graph)
            self._database_interface.add_routing_tables(self._router_tables)
            self._database_interface.add_tags(self._partitioned_graph,
                                              self._tags)
            execute_mapping = config.getboolean(
                "Database", "create_routing_info_to_neuron_id_mapping")
            if execute_mapping:
                self._database_interface.create_neuron_to_key_mapping(
                    graph_mapper=self._graph_mapper,
                    partitionable_graph=self._partitionable_graph,
                    partitioned_graph=self._partitioned_graph,
                    routing_infos=self._routing_infos)
            # if using a reload script, add if that needs to wait for
            # confirmation
            if self._reports_states.transciever_report:
                self._reload_script.wait_on_confirmation = wait_on_confirmation
                for socket_address in self._database_socket_addresses:
                    self._reload_script.add_socket_address(socket_address)
            self._database_interface.send_read_notification()

        # execute data spec generation
        if do_timing:
            timer.start_timing()
        logger.info("*** Generating Output *** ")
        logger.debug("")
        executable_targets = self.generate_data_specifications()
        if do_timing:
            timer.take_sample()

        # execute data spec execution
        if do_timing:
            timer.start_timing()
        processor_to_app_data_base_address = \
            self.execute_data_specification_execution(
                config.getboolean("SpecExecution", "specExecOnHost"),
                self._hostname, self._placements, self._graph_mapper,
                write_text_specs=config.getboolean(
                    "Reports", "writeTextSpecs"),
                runtime_application_data_folder=self._app_data_runtime_folder,
                machine=self._machine)

        if self._reports_states is not None:
            reports.write_memory_map_report(
                self._report_default_directory,
                processor_to_app_data_base_address)

        if do_timing:
            timer.take_sample()

        if (not isinstance(self._machine, VirtualMachine)
                and config.getboolean("Execute", "run_simulation")):
            if do_timing:
                timer.start_timing()

            logger.info("*** Loading tags ***")
            self.load_tags(self._tags)

            if self._do_load is True:
                logger.info("*** Loading data ***")
                self._load_application_data(
                    self._placements,
                    self._graph_mapper,
                    processor_to_app_data_base_address,
                    self._hostname,
                    app_data_folder=self._app_data_runtime_folder,
                    verify=config.getboolean("Mode", "verify_writes"))
                self.load_routing_tables(self._router_tables, self._app_id)
                logger.info("*** Loading executables ***")
                self.load_executable_images(executable_targets, self._app_id)
                logger.info("*** Loading buffers ***")
                self.set_up_send_buffering(self._partitioned_graph,
                                           self._placements, self._tags)

            # end of entire loading setup
            if do_timing:
                timer.take_sample()

            if self._do_run is True:
                logger.info("*** Running simulation... *** ")
                if do_timing:
                    timer.start_timing()
                # every thing is in sync0. load the initial buffers
                self._send_buffer_manager.load_initial_buffers()
                if do_timing:
                    timer.take_sample()

                wait_on_confirmation = config.getboolean(
                    "Database", "wait_on_confirmation")
                send_start_notification = config.getboolean(
                    "Database", "send_start_notification")

                self.wait_for_cores_to_be_ready(executable_targets,
                                                self._app_id)

                # wait till external app is ready for us to start if required
                if (self._database_interface is not None
                        and wait_on_confirmation):
                    self._database_interface.wait_for_confirmation()

                self.start_all_cores(executable_targets, self._app_id)

                if (self._database_interface is not None
                        and send_start_notification):
                    self._database_interface.send_start_notification()

                if self._runtime is None:
                    logger.info("Application is set to run forever - exiting")
                else:
                    self.wait_for_execution_to_complete(
                        executable_targets, self._app_id, self._runtime,
                        self._time_scale_factor)
                self._has_ran = True
                if self._retrieve_provance_data:

                    progress = ProgressBar(self._placements.n_placements + 1,
                                           "getting provenance data")

                    # retrieve provence data from central
                    file_path = os.path.join(self._report_default_directory,
                                             "provance_data")

                    # check the directory doesnt already exist
                    if not os.path.exists(file_path):
                        os.mkdir(file_path)

                    # write provanence data
                    self.write_provenance_data_in_xml(file_path, self._txrx)
                    progress.update()

                    # retrieve provenance data from any cores that provide data
                    for placement in self._placements.placements:
                        if isinstance(placement.subvertex,
                                      AbstractProvidesProvenanceData):
                            core_file_path = os.path.join(
                                file_path,
                                "Provanence_data_for_{}_{}_{}_{}.xml".format(
                                    placement.subvertex.label, placement.x,
                                    placement.y, placement.p))
                            placement.subvertex.write_provenance_data_in_xml(
                                core_file_path, self.transceiver, placement)
                        progress.update()
                    progress.end()

        elif isinstance(self._machine, VirtualMachine):
            logger.info(
                "*** Using a Virtual Machine so no simulation will occur")
        else:
            logger.info("*** No simulation requested: Stopping. ***")
コード例 #26
0
ファイル: spinnaker.py プロジェクト: Paul92/sPyNNaker
    def generate_data_specifications(self):
        """ generates the dsg for the graph.

        :return:
        """

        # iterate though subvertexes and call generate_data_spec for each
        # vertex
        executable_targets = dict()
        no_processors = config.getint("Threading", "dsg_threads")
        thread_pool = ThreadPool(processes=no_processors)

        # create a progress bar for end users
        progress_bar = ProgressBar(len(list(self._placements.placements)),
                                   "on generating data specifications")
        data_generator_interfaces = list()
        for placement in self._placements.placements:
            associated_vertex =\
                self._graph_mapper.get_vertex_from_subvertex(
                    placement.subvertex)

            # if the vertex can generate a DSG, call it
            if isinstance(associated_vertex, AbstractDataSpecableVertex):

                ip_tags = self._tags.get_ip_tags_for_vertex(
                    placement.subvertex)
                reverse_ip_tags = self._tags.get_reverse_ip_tags_for_vertex(
                    placement.subvertex)
                data_generator_interface = DataGeneratorInterface(
                    associated_vertex, placement.subvertex, placement,
                    self._partitioned_graph, self._partitionable_graph,
                    self._routing_infos, self._hostname, self._graph_mapper,
                    self._report_default_directory, ip_tags, reverse_ip_tags,
                    self._writeTextSpecs, self._app_data_runtime_folder,
                    progress_bar)
                data_generator_interfaces.append(data_generator_interface)
                thread_pool.apply_async(data_generator_interface.start)

                # Get name of binary from vertex
                binary_name = associated_vertex.get_binary_file_name()

                # Attempt to find this within search paths
                binary_path = executable_finder.get_executable_path(
                    binary_name)
                if binary_path is None:
                    raise exceptions.ExecutableNotFoundException(binary_name)

                if binary_path in executable_targets:
                    executable_targets[binary_path].add_processor(placement.x,
                                                                  placement.y,
                                                                  placement.p)
                else:
                    processors = [placement.p]
                    initial_core_subset = CoreSubset(placement.x, placement.y,
                                                     processors)
                    list_of_core_subsets = [initial_core_subset]
                    executable_targets[binary_path] = \
                        CoreSubsets(list_of_core_subsets)

        for data_generator_interface in data_generator_interfaces:
            data_generator_interface.wait_for_finish()
        thread_pool.close()
        thread_pool.join()

        # finish the progress bar
        progress_bar.end()

        return executable_targets
コード例 #27
0
ファイル: spinnaker.py プロジェクト: lmateev/sPyNNaker
    def _set_up_timings(self, timestep, min_delay, max_delay):
        self._machine_time_step = config.getint("Machine", "machineTimeStep")

        # deal with params allowed via the setup options
        if timestep is not None:

            # convert into milliseconds from microseconds
            timestep *= 1000
            self._machine_time_step = timestep

        if min_delay is not None and float(min_delay * 1000) < 1.0 * timestep:
            raise common_exceptions.ConfigurationException(
                "Pacman does not support min delays below {} ms with the "
                "current machine time step".format(
                    constants.MIN_SUPPORTED_DELAY * timestep))

        natively_supported_delay_for_models = \
            constants.MAX_SUPPORTED_DELAY_TICS
        delay_extension_max_supported_delay = \
            constants.MAX_DELAY_BLOCKS \
            * constants.MAX_TIMER_TICS_SUPPORTED_PER_BLOCK

        max_delay_tics_supported = \
            natively_supported_delay_for_models + \
            delay_extension_max_supported_delay

        if max_delay is not None\
           and float(max_delay * 1000) > max_delay_tics_supported * timestep:
            raise common_exceptions.ConfigurationException(
                "Pacman does not support max delays above {} ms with the "
                "current machine time step".format(0.144 * timestep))
        if min_delay is not None:
            self._min_supported_delay = min_delay
        else:
            self._min_supported_delay = timestep / 1000.0

        if max_delay is not None:
            self._max_supported_delay = max_delay
        else:
            self._max_supported_delay = (max_delay_tics_supported *
                                         (timestep / 1000.0))

        if (config.has_option("Machine", "timeScaleFactor")
                and config.get("Machine", "timeScaleFactor") != "None"):
            self._time_scale_factor = \
                config.getint("Machine", "timeScaleFactor")
            if timestep * self._time_scale_factor < 1000:
                if config.getboolean("Mode",
                                     "violate_1ms_wall_clock_restriction"):
                    logger.warn(
                        "****************************************************")
                    logger.warn(
                        "*** The combination of simulation time step and  ***")
                    logger.warn(
                        "*** the machine time scale factor results in a   ***")
                    logger.warn(
                        "*** wall clock timer tick that is currently not  ***")
                    logger.warn(
                        "*** reliably supported by the spinnaker machine. ***")
                    logger.warn(
                        "****************************************************")
                else:
                    raise common_exceptions.ConfigurationException(
                        "The combination of simulation time step and the"
                        " machine time scale factor results in a wall clock "
                        "timer tick that is currently not reliably supported "
                        "by the spinnaker machine.  If you would like to "
                        "override this behaviour (at your own risk), please "
                        "add violate_1ms_wall_clock_restriction = True to the "
                        "[Mode] section of your .spynnaker.cfg file")
        else:
            self._time_scale_factor = max(1,
                                          math.ceil(1000.0 / float(timestep)))
            if self._time_scale_factor > 1:
                logger.warn(
                    "A timestep was entered that has forced sPyNNaker "
                    "to automatically slow the simulation down from "
                    "real time by a factor of {}. To remove this "
                    "automatic behaviour, please enter a "
                    "timescaleFactor value in your .spynnaker.cfg".format(
                        self._time_scale_factor))
コード例 #28
0
    def _set_up_machine_specifics(self, timestep, min_delay, max_delay,
                                  hostname):
        self._machine_time_step = config.getint("Machine", "machineTimeStep")

        # deal with params allowed via the setup optimals
        if timestep is not None:
            timestep *= 1000  # convert into ms from microseconds
            config.set("Machine", "machineTimeStep", timestep)
            self._machine_time_step = timestep

        if min_delay is not None and float(min_delay * 1000) < 1.0 * timestep:
            raise exceptions.ConfigurationException(
                "Pacman does not support min delays below {} ms with the "
                "current machine time step".format(1.0 * timestep))

        natively_supported_delay_for_models = \
            constants.MAX_SUPPORTED_DELAY_TICS
        delay_extention_max_supported_delay = \
            constants.MAX_DELAY_BLOCKS \
            * constants.MAX_TIMER_TICS_SUPPORTED_PER_BLOCK

        max_delay_tics_supported = \
            natively_supported_delay_for_models + \
            delay_extention_max_supported_delay

        if max_delay is not None\
           and float(max_delay * 1000) > max_delay_tics_supported * timestep:
            raise exceptions.ConfigurationException(
                "Pacman does not support max delays above {} ms with the "
                "current machine time step".format(0.144 * timestep))
        if min_delay is not None:
            if not config.has_section("Model"):
                config.add_section("Model")
            config.set("Model", "min_delay", (min_delay * 1000) / timestep)

        if max_delay is not None:
            if not config.has_section("Model"):
                config.add_section("Model")
            config.set("Model", "max_delay", (max_delay * 1000) / timestep)

        if (config.has_option("Machine", "timeScaleFactor")
                and config.get("Machine", "timeScaleFactor") != "None"):
            self._time_scale_factor = \
                config.getint("Machine", "timeScaleFactor")
            if timestep * self._time_scale_factor < 1000:
                logger.warn("the combination of machine time step and the "
                            "machine time scale factor results in a real "
                            "timer tick that is currently not reliably "
                            "supported by the spinnaker machine.")
        else:
            self._time_scale_factor = max(1,
                                          math.ceil(1000.0 / float(timestep)))
            if self._time_scale_factor > 1:
                logger.warn("A timestep was entered that has forced pacman103 "
                            "to automatically slow the simulation down from "
                            "real time by a factor of {}. To remove this "
                            "automatic behaviour, please enter a "
                            "timescaleFactor value in your .pacman.cfg".format(
                                self._time_scale_factor))

        if hostname is not None:
            self._hostname = hostname
            logger.warn("The machine name from PYNN setup is overriding the "
                        "machine name defined in the spynnaker.cfg file")
        elif config.has_option("Machine", "machineName"):
            self._hostname = config.get("Machine", "machineName")
        else:
            raise Exception("A SpiNNaker machine must be specified in "
                            "spynnaker.cfg.")
        use_virtual_board = config.getboolean("Machine", "virtual_board")
        if self._hostname == 'None' and not use_virtual_board:
            raise Exception("A SpiNNaker machine must be specified in "
                            "spynnaker.cfg.")