def send_stop_message(self):
     """ Send a message to indicate the end of all the messages
     :return:
     """
     if not self._sent_stop_message:
         self._sent_stop_message = True
         self.add_message_to_send(EventStopRequest())
    def _send_messages(self, size, vertex, region, sequence_no):
        """ Send a set of messages
        """

        # Get the sent messages for the vertex
        if vertex not in self._sent_messages:
            self._sent_messages[vertex] = BuffersSentDeque(region)
        sent_messages = self._sent_messages[vertex]

        # If the sequence number is outside the window, return no messages
        if not sent_messages.update_last_received_sequence_number(sequence_no):
            return list()

        # Remote the existing packets from the size available
        bytes_to_go = size
        for message in sent_messages.messages:
            if isinstance(message.eieio_data_message, EIEIODataMessage):
                bytes_to_go -= message.eieio_data_message.size
            else:
                bytes_to_go -= (message.eieio_data_message
                                .get_min_packet_length())

        # Add messages up to the limits
        while (vertex.is_next_timestamp(region) and
                not sent_messages.is_full and bytes_to_go > 0):

            space_available = min(
                bytes_to_go,
                constants.UDP_MESSAGE_MAX_SIZE -
                HostSendSequencedData.get_min_packet_length())
            # logger.debug(
            #     "Bytes to go {}, space available {}".format(
            #         bytes_to_go, space_available))
            next_message = self._create_message_to_send(
                space_available, vertex, region)
            if next_message is None:
                break
            sent_messages.add_message_to_send(next_message)
            bytes_to_go -= next_message.size
            # logger.debug("Adding additional buffer of {} bytes".format(
            #     next_message.size))

        # If the vertex is empty, send the stop messages if there is space
        if (not sent_messages.is_full and
                not vertex.is_next_timestamp(region) and
                bytes_to_go >= EventStopRequest.get_min_packet_length()):
            sent_messages.send_stop_message()

        # If there are no more messages, turn off requests for more messages
        if not vertex.is_next_timestamp(region) and sent_messages.is_empty():
            # logger.debug("Sending stop")
            self._send_request(vertex, StopRequests())

        # Send the messages
        for message in sent_messages.messages:
            # logger.debug("Sending message with sequence {}".format(
            #     message.sequence_no))
            self._send_request(vertex, message)
Exemple #3
0
    def _send_messages(self, size, vertex, region, sequence_no):
        """ Send a set of messages
        """

        # Get the sent messages for the vertex
        if vertex not in self._sent_messages:
            self._sent_messages[vertex] = BuffersSentDeque(region)
        sent_messages = self._sent_messages[vertex]

        # If the sequence number is outside the window, return no messages
        if not sent_messages.update_last_received_sequence_number(sequence_no):
            return list()

        # Remote the existing packets from the size available
        bytes_to_go = size
        for message in sent_messages.messages:
            if isinstance(message.eieio_data_message, EIEIODataMessage):
                bytes_to_go -= message.eieio_data_message.size
            else:
                bytes_to_go -= (
                    message.eieio_data_message.get_min_packet_length())

        # Add messages up to the limits
        while (vertex.is_next_timestamp(region) and not sent_messages.is_full
               and bytes_to_go > 0):

            space_available = min(
                bytes_to_go, constants.UDP_MESSAGE_MAX_SIZE -
                HostSendSequencedData.get_min_packet_length())
            # logger.debug(
            #     "Bytes to go {}, space available {}".format(
            #         bytes_to_go, space_available))
            next_message = self._create_message_to_send(
                space_available, vertex, region)
            if next_message is None:
                break
            sent_messages.add_message_to_send(next_message)
            bytes_to_go -= next_message.size
            # logger.debug("Adding additional buffer of {} bytes".format(
            #     next_message.size))

        # If the vertex is empty, send the stop messages if there is space
        if (not sent_messages.is_full and not vertex.is_next_timestamp(region)
                and bytes_to_go >= EventStopRequest.get_min_packet_length()):
            sent_messages.send_stop_message()

        # If there are no more messages, turn off requests for more messages
        if not vertex.is_next_timestamp(region) and sent_messages.is_empty():
            # logger.debug("Sending stop")
            self._send_request(vertex, StopRequests())

        # Send the messages
        for message in sent_messages.messages:
            # logger.debug("Sending message with sequence {}".format(
            #     message.sequence_no))
            self._send_request(vertex, message)
    def _get_spike_send_buffer(self, vertex_slice):
        """
        spikeArray is a list with one entry per 'neuron'. The entry for
        one neuron is a list of times (in ms) when the neuron fires.
        We need to transpose this 'matrix' and get a list of firing neuron
        indices for each time tick:
        List can come in two formats (both now supported):
        1) Official PyNN format - single list that is used for all neurons
        2) SpiNNaker format - list of lists, one per neuron
        """
        send_buffer = None
        key = (vertex_slice.lo_atom, vertex_slice.hi_atom)
        if key not in self._send_buffers:
            send_buffer = BufferedSendingRegion()
            if isinstance(self._spike_times[0], list):

                # This is in SpiNNaker 'list of lists' format:
                for neuron in range(vertex_slice.lo_atom,
                                    vertex_slice.hi_atom + 1):
                    for timeStamp in sorted(self._spike_times[neuron]):
                        time_stamp_in_ticks = int(
                            math.ceil((timeStamp * 1000.0) /
                                      self._machine_time_step))
                        send_buffer.add_key(time_stamp_in_ticks,
                                            neuron - vertex_slice.lo_atom)
            else:

                # This is in official PyNN format, all neurons use the
                # same list:
                neuron_list = range(vertex_slice.n_atoms)
                for timeStamp in sorted(self._spike_times):
                    time_stamp_in_ticks = int(
                        math.ceil((timeStamp * 1000.0) /
                                  self._machine_time_step))

                    # add to send_buffer collection
                    send_buffer.add_keys(time_stamp_in_ticks, neuron_list)

            # Update the size
            total_size = 0
            for timestamp in send_buffer.timestamps:
                n_keys = send_buffer.get_n_keys(timestamp)
                total_size += BufferManager.get_n_bytes(n_keys)
            total_size += EventStopRequest.get_min_packet_length()
            if total_size > self._max_on_chip_memory_usage_for_spikes:
                total_size = self._max_on_chip_memory_usage_for_spikes
            send_buffer.buffer_size = total_size
            self._send_buffers[key] = send_buffer
        else:
            send_buffer = self._send_buffers[key]
        return send_buffer
    def _get_spike_send_buffer(self, vertex_slice):
        """
        spikeArray is a list with one entry per 'neuron'. The entry for
        one neuron is a list of times (in ms) when the neuron fires.
        We need to transpose this 'matrix' and get a list of firing neuron
        indices for each time tick:
        List can come in two formats (both now supported):
        1) Official PyNN format - single list that is used for all neurons
        2) SpiNNaker format - list of lists, one per neuron
        """
        send_buffer = None
        key = (vertex_slice.lo_atom, vertex_slice.hi_atom)
        if key not in self._send_buffers:
            send_buffer = BufferedSendingRegion()
            if isinstance(self._spike_times[0], list):

                # This is in SpiNNaker 'list of lists' format:
                for neuron in range(vertex_slice.lo_atom,
                                    vertex_slice.hi_atom + 1):
                    for timeStamp in sorted(self._spike_times[neuron]):
                        time_stamp_in_ticks = int(
                            math.ceil((timeStamp * 1000.0) /
                                      self._machine_time_step))
                        send_buffer.add_key(time_stamp_in_ticks,
                                            neuron - vertex_slice.lo_atom)
            else:

                # This is in official PyNN format, all neurons use the
                # same list:
                neuron_list = range(vertex_slice.n_atoms)
                for timeStamp in sorted(self._spike_times):
                    time_stamp_in_ticks = int(
                        math.ceil(
                            (timeStamp * 1000.0) / self._machine_time_step))

                    # add to send_buffer collection
                    send_buffer.add_keys(time_stamp_in_ticks, neuron_list)

            # Update the size
            total_size = 0
            for timestamp in send_buffer.timestamps:
                n_keys = send_buffer.get_n_keys(timestamp)
                total_size += BufferManager.get_n_bytes(n_keys)
            total_size += EventStopRequest.get_min_packet_length()
            if total_size > self._max_on_chip_memory_usage_for_spikes:
                total_size = self._max_on_chip_memory_usage_for_spikes
            send_buffer.buffer_size = total_size
            self._send_buffers[key] = send_buffer
        else:
            send_buffer = self._send_buffers[key]
        return send_buffer
 def _calculate_sizes(self):
     """ Deduce how big the buffer and the region needs to be
     :return:
     """
     size = 0
     for timestamp in self._timestamps:
         n_keys = self.get_n_keys(timestamp)
         size += self.get_n_bytes(n_keys)
     size += EventStopRequest.get_min_packet_length()
     if size > self._max_size_of_buffer:
         self._buffer_size = self._max_size_of_buffer
     else:
         self._buffer_size = size
     self._total_region_size = size
 def _calculate_sizes(self):
     """ Deduce how big the buffer and the region needs to be
     :return:
     """
     size = 0
     for timestamp in self._timestamps:
         n_keys = self.get_n_keys(timestamp)
         size += self.get_n_bytes(n_keys)
     size += EventStopRequest.get_min_packet_length()
     if size > self._max_size_of_buffer:
         self._buffer_size = self._max_size_of_buffer
     else:
         self._buffer_size = size
     self._total_region_size = size
 def _calculate_sizes(self):
     """
     takes the data and deduces how much size the buffer and the region
     requires.
     :return:
     """
     size = 0
     for timestamp in self._timestamps:
         n_keys = self.get_n_keys(timestamp)
         size += self.get_n_bytes(n_keys)
     size += EventStopRequest.get_min_packet_length()
     if size > self._max_size_of_buffer:
         self._buffer_size = self._max_size_of_buffer
     else:
         self._buffer_size = size
     self._total_region_size = size
    def _send_initial_messages(self, vertex, region, progress_bar):
        """ Send the initial set of messages

        :param vertex: The vertex to get the keys from
        :type vertex:\
                    :py:class:`spynnaker.pyNN.models.abstract_models.buffer_models.abstract_sends_buffers_from_host_partitioned_vertex.AbstractSendsBuffersFromHostPartitionedVertex`
        :param region: The region to get the keys from
        :type region: int
        :return: A list of messages
        :rtype: list of\
                    :py:class:`spinnman.messages.eieio.data_messages.eieio_32bit.eieio_32bit_timed_payload_prefix_data_message.EIEIO32BitTimedPayloadPrefixDataMessage`
        """

        # Get the vertex load details
        region_base_address = self._locate_region_address(region, vertex)
        placement = self._placements.get_placement_of_subvertex(vertex)

        # Add packets until out of space
        sent_message = False
        bytes_to_go = vertex.get_region_buffer_size(region)
        if bytes_to_go % 2 != 0:
            raise exceptions.SpinnFrontEndException(
                "The buffer region of {} must be divisible by 2".format(
                    vertex))
        all_data = ""
        if vertex.is_empty(region):
            sent_message = True
        else:
            while (vertex.is_next_timestamp(region) and
                    bytes_to_go > (EIEIO32BitTimedPayloadPrefixDataMessage
                                   .get_min_packet_length())):
                space_available = min(bytes_to_go, 255 * _N_BYTES_PER_KEY)
                next_message = self._create_message_to_send(
                    space_available, vertex, region)
                if next_message is None:
                    break

                # Write the message to the memory
                data = next_message.bytestring
                all_data += data
                sent_message = True

                # Update the positions
                bytes_to_go -= len(data)
                progress_bar.update(len(data))

        if not sent_message:
            raise exceptions.BufferableRegionTooSmall(
                "The buffer size {} is too small for any data to be added for"
                " region {} of vertex {}".format(bytes_to_go, region, vertex))

        # If there are no more messages and there is space, add a stop request
        if (not vertex.is_next_timestamp(region) and
                bytes_to_go >= EventStopRequest.get_min_packet_length()):
            data = EventStopRequest().bytestring
            logger.debug("Writing stop message of {} bytes to {} on"
                         " {}, {}, {}".format(
                             len(data), hex(region_base_address),
                             placement.x, placement.y, placement.p))
            all_data += data
            bytes_to_go -= len(data)
            progress_bar.update(len(data))
            self._sent_messages[vertex] = BuffersSentDeque(
                region, sent_stop_message=True)

        # If there is any space left, add padding
        if bytes_to_go > 0:
            padding_packet = PaddingRequest()
            n_packets = bytes_to_go / padding_packet.get_min_packet_length()
            data = padding_packet.bytestring
            data *= n_packets
            all_data += data

        # Do the writing all at once for efficiency
        self._transceiver.write_memory(
            placement.x, placement.y, region_base_address, all_data)
Exemple #10
0
    def _send_initial_messages(self, vertex, region, progress_bar):
        """ Send the initial set of messages

        :param vertex: The vertex to get the keys from
        :type vertex:\
                    :py:class:`spynnaker.pyNN.models.abstract_models.buffer_models.abstract_sends_buffers_from_host.AbstractSendsBuffersFromHost`
        :param region: The region to get the keys from
        :type region: int
        :return: A list of messages
        :rtype: list of\
                    :py:class:`spinnman.messages.eieio.data_messages.eieio_32bit.eieio_32bit_timed_payload_prefix_data_message.EIEIO32BitTimedPayloadPrefixDataMessage`
        """

        # Get the vertex load details
        # region_base_address = self._locate_region_address(region, vertex)
        region_base_address = \
            helpful_functions.locate_memory_region_for_placement(
                self._placements.get_placement_of_subvertex(vertex), region,
                self._transceiver)
        placement = self._placements.get_placement_of_subvertex(vertex)

        # Add packets until out of space
        sent_message = False
        bytes_to_go = vertex.get_region_buffer_size(region)
        if bytes_to_go % 2 != 0:
            raise exceptions.SpinnFrontEndException(
                "The buffer region of {} must be divisible by 2".format(
                    vertex))
        all_data = ""
        if vertex.is_empty(region):
            sent_message = True
        else:
            min_size_of_packet = \
                EIEIO32BitTimedPayloadPrefixDataMessage.get_min_packet_length()
            while (vertex.is_next_timestamp(region)
                   and bytes_to_go > min_size_of_packet):
                space_available = min(bytes_to_go, 280)
                next_message = self._create_message_to_send(
                    space_available, vertex, region)
                if next_message is None:
                    break

                # Write the message to the memory
                data = next_message.bytestring
                all_data += data
                sent_message = True

                # Update the positions
                bytes_to_go -= len(data)
                progress_bar.update(len(data))

        if not sent_message:
            raise exceptions.BufferableRegionTooSmall(
                "The buffer size {} is too small for any data to be added for"
                " region {} of vertex {}".format(bytes_to_go, region, vertex))

        # If there are no more messages and there is space, add a stop request
        if (not vertex.is_next_timestamp(region)
                and bytes_to_go >= EventStopRequest.get_min_packet_length()):
            data = EventStopRequest().bytestring
            # logger.debug(
            #    "Writing stop message of {} bytes to {} on {}, {}, {}".format(
            #         len(data), hex(region_base_address),
            #         placement.x, placement.y, placement.p))
            all_data += data
            bytes_to_go -= len(data)
            progress_bar.update(len(data))
            self._sent_messages[vertex] = BuffersSentDeque(
                region, sent_stop_message=True)

        # If there is any space left, add padding
        if bytes_to_go > 0:
            padding_packet = PaddingRequest()
            n_packets = bytes_to_go / padding_packet.get_min_packet_length()
            data = padding_packet.bytestring
            data *= n_packets
            all_data += data

        # Do the writing all at once for efficiency
        self._transceiver.write_memory(placement.x, placement.y,
                                       region_base_address, all_data)
Exemple #11
0
def read_eieio_command_message(data, offset):
    """ Reads the content of an EIEIO command message and returns an object\
        identifying the command which was contained in the packet, including\
        any parameter, if required by the command

    :param data: data received from the network
    :type data: bytestring
    :param offset: offset at which the parsing operation should start
    :type offset: int
    :return: an object which inherits from EIEIOCommandMessage which contains\
            parsed data received from the network
    :rtype: \
            :py:class:`spinnman.messages.eieio.command_messages.eieio_command_message.EIEIOCommandMessage`
    """
    command_header = EIEIOCommandHeader.from_bytestring(data, offset)
    command_number = command_header.command

    if (command_number ==
            constants.EIEIO_COMMAND_IDS.DATABASE_CONFIRMATION.value):
        return DatabaseConfirmation.from_bytestring(command_header, data,
                                                    offset + 2)

    # Fill in buffer area with padding
    elif (command_number == constants.EIEIO_COMMAND_IDS.EVENT_PADDING.value):
        return PaddingRequest()

    # End of all buffers, stop execution
    elif (command_number == constants.EIEIO_COMMAND_IDS.EVENT_STOP.value):
        return EventStopRequest()

    # Stop complaining that there is sdram free space for buffers
    elif (command_number ==
          constants.EIEIO_COMMAND_IDS.STOP_SENDING_REQUESTS.value):
        return StopRequests()

    # Start complaining that there is sdram free space for buffers
    elif (command_number ==
          constants.EIEIO_COMMAND_IDS.START_SENDING_REQUESTS.value):
        return StartRequests()

    # Spinnaker requesting new buffers for spike source population
    elif (command_number ==
          constants.EIEIO_COMMAND_IDS.SPINNAKER_REQUEST_BUFFERS.value):
        return SpinnakerRequestBuffers.from_bytestring(command_header, data,
                                                       offset + 2)

    # Buffers being sent from host to SpiNNaker
    elif (command_number ==
          constants.EIEIO_COMMAND_IDS.HOST_SEND_SEQUENCED_DATA.value):
        return HostSendSequencedData.from_bytestring(command_header, data,
                                                     offset + 2)

    # Buffers available to be read from a buffered out vertex
    elif (command_number ==
          constants.EIEIO_COMMAND_IDS.SPINNAKER_REQUEST_READ_DATA.value):
        return SpinnakerRequestReadData.from_bytestring(
            command_header, data, offset + 2)

    # Host confirming data being read form SpiNNaker memory
    elif (command_number == constants.EIEIO_COMMAND_IDS.HOST_DATA_READ.value):
        return HostDataRead.from_bytestring(command_header, data, offset + 2)
    return EIEIOCommandMessage(command_header, data, offset + 2)