Esempio n. 1
0
    def __init__(self, key_prefix, count=0, data=None, offset=0):

        EIEIOWithPayloadDataMessage.__init__(
            self, EIEIODataHeader(EIEIOType.KEY_PAYLOAD_16_BIT,
                                  prefix=key_prefix, is_time=True,
                                  count=count),
            data, offset)
Esempio n. 2
0
 def __init__(self, key_prefix, count=0, data=None, offset=0):
     EIEIOWithPayloadDataMessage.__init__(
         self,
         EIEIODataHeader(EIEIOType.KEY_PAYLOAD_32_BIT,
                         prefix=key_prefix,
                         prefix_type=EIEIOPrefix.UPPER_HALF_WORD,
                         count=count), data, offset)
def read_eieio_data_message(data, offset):
    """ Reads the content of an EIEIO data message and returns an object\
        identifying the data which was contained in the packet

    :param data: data received from the network
    :type data: bytestring
    :param offset: offset at which the parsing operation should start
    :type offset: int
    :return: an object which inherits from EIEIODataMessage which contains\
            parsed data received from the network
    :rtype:\
            :py:class:`spinnman.messages.eieio.data_messages.eieio_data_message.EIEIODataMessage`
    """
    eieio_header = EIEIODataHeader.from_bytestring(data, offset)
    offset += eieio_header.size
    eieio_type = eieio_header.eieio_type
    prefix = eieio_header.prefix
    payload_base = eieio_header.payload_base
    prefix_type = eieio_header.prefix_type
    is_time = eieio_header.is_time
    if eieio_type == EIEIOType.KEY_16_BIT:
        return _read_16_bit_message(prefix, payload_base, prefix_type, is_time,
                                    data, offset, eieio_header)
    elif eieio_type == EIEIOType.KEY_PAYLOAD_16_BIT:
        return _read_16_bit_payload_message(prefix, payload_base, prefix_type,
                                            is_time, data, offset,
                                            eieio_header)
    elif eieio_type == EIEIOType.KEY_32_BIT:
        return _read_32_bit_message(prefix, payload_base, prefix_type, is_time,
                                    data, offset, eieio_header)
    elif eieio_type == EIEIOType.KEY_PAYLOAD_32_BIT:
        return _read_32_bit_payload_message(prefix, payload_base, prefix_type,
                                            is_time, data, offset,
                                            eieio_header)
    return EIEIODataMessage(eieio_header, data, offset)
Esempio n. 4
0
 def __init__(self, key_prefix, payload_prefix, count=0, data=None,
              offset=0):
     EIEIOWithoutPayloadDataMessage.__init__(
         self, EIEIODataHeader(EIEIOType.KEY_32_BIT,
                               payload_prefix=payload_prefix,
                               prefix=key_prefix, count=count),
         data, offset)
Esempio n. 5
0
def read_eieio_data_message(data, offset):
    eieio_header = EIEIODataHeader.from_bytestring(data, offset)
    offset += eieio_header.size
    eieio_type = eieio_header.eieio_type
    prefix = eieio_header.prefix
    payload_base = eieio_header.payload_base
    prefix_type = eieio_header.prefix_type
    is_time = eieio_header.is_time
    if eieio_type == EIEIOType.KEY_16_BIT:
        return _read_16_bit_message(
            prefix, payload_base, prefix_type, is_time, data, offset,
            eieio_header)
    elif eieio_type == EIEIOType.KEY_PAYLOAD_16_BIT:
        return _read_16_bit_payload_message(
            prefix, payload_base, prefix_type, is_time, data, offset,
            eieio_header)
    elif eieio_type == EIEIOType.KEY_32_BIT:
        return _read_32_bit_message(
            prefix, payload_base, prefix_type, is_time, data, offset,
            eieio_header)
    elif eieio_type == EIEIOType.KEY_PAYLOAD_32_BIT:
        return _read_32_bit_payload_message(
            prefix, payload_base, prefix_type, is_time, data, offset,
            eieio_header)
    return EIEIODataMessage(eieio_header, data, offset)
 def __init__(self, key_prefix, timestamp, count=0, data=None, offset=0):
     EIEIOWithoutPayloadDataMessage.__init__(
         self,
         EIEIODataHeader(EIEIOType.KEY_32_BIT,
                         payload_base=timestamp,
                         is_time=True,
                         prefix=key_prefix,
                         count=count), data, offset)
Esempio n. 7
0
    def get_spikes(self, label, buffer_manager, region, placements,
                   graph_mapper, application_vertex, base_key_function,
                   machine_time_step):

        results = list()
        missing_str = ""
        ms_per_tick = machine_time_step / 1000.0
        vertices = \
            graph_mapper.get_machine_vertices(application_vertex)
        progress_bar = ProgressBar(len(vertices),
                                   "Getting spikes for {}".format(label))

        for vertex in vertices:

            placement = placements.get_placement_of_vertex(vertex)
            vertex_slice = graph_mapper.get_slice(vertex)

            x = placement.x
            y = placement.y
            p = placement.p

            # Read the spikes
            raw_spike_data, data_missing = \
                buffer_manager.get_data_for_vertex(placement, region)
            if data_missing:
                missing_str += "({}, {}, {}); ".format(x, y, p)
            spike_data = str(raw_spike_data.read_all())
            number_of_bytes_written = len(spike_data)

            offset = 0
            while offset < number_of_bytes_written:
                eieio_header = EIEIODataHeader.from_bytestring(
                    spike_data, offset)
                offset += eieio_header.size
                timestamp = eieio_header.payload_base * ms_per_tick
                timestamps = numpy.repeat([timestamp], eieio_header.count)
                keys = numpy.frombuffer(spike_data,
                                        dtype="<u4",
                                        count=eieio_header.count,
                                        offset=offset)
                neuron_ids = ((keys - base_key_function(vertex)) +
                              vertex_slice.lo_atom)
                offset += eieio_header.count * 4
                results.append(numpy.dstack((neuron_ids, timestamps))[0])
            progress_bar.update()

        progress_bar.end()
        if len(missing_str) > 0:
            logger.warn(
                "Population {} is missing spike data in region {} from the"
                " following cores: {}".format(label, region, missing_str))
        if len(results) != 0:
            result = numpy.vstack(results)
            result = result[numpy.lexsort((result[:, 1], result[:, 0]))]
        else:
            result = []
        return result
    def get_spikes(self, label, buffer_manager, region, state_region,
                   placements, graph_mapper, partitionable_vertex,
                   base_key_function):

        results = list()
        missing_str = ""
        ms_per_tick = self._machine_time_step / 1000.0
        subvertices = \
            graph_mapper.get_subvertices_from_vertex(partitionable_vertex)
        progress_bar = ProgressBar(len(subvertices),
                                   "Getting spikes for {}".format(label))

        for subvertex in subvertices:

            placement = placements.get_placement_of_subvertex(subvertex)
            subvertex_slice = graph_mapper.get_subvertex_slice(subvertex)

            x = placement.x
            y = placement.y
            p = placement.p

            # Read the spikes
            raw_spike_data, data_missing = \
                buffer_manager.get_data_for_vertex(
                    x, y, p, region, state_region)
            if data_missing:
                missing_str += "({}, {}, {}); ".format(x, y, p)
            spike_data = raw_spike_data.read_all()
            number_of_bytes_written = len(spike_data)

            offset = 0
            while offset < number_of_bytes_written:
                eieio_header = EIEIODataHeader.from_bytestring(
                    spike_data, offset)
                offset += eieio_header.size
                timestamp = eieio_header.payload_base * ms_per_tick
                timestamps = numpy.repeat([timestamp], eieio_header.count)
                keys = numpy.frombuffer(
                    spike_data, dtype="<u4", count=eieio_header.count,
                    offset=offset)
                neuron_ids = ((keys - base_key_function(subvertex)) +
                              subvertex_slice.lo_atom)
                offset += eieio_header.count * 4
                results.append(numpy.dstack((neuron_ids, timestamps))[0])
            progress_bar.update()

        progress_bar.end()
        if len(missing_str) > 0:
            logger.warn(
                "Population {} is missing spike data in region {} from the"
                " following cores: {}".format(label, region, missing_str))
        if len(results) != 0:
            result = numpy.vstack(results)
            result = result[numpy.lexsort((result[:, 1], result[:, 0]))]
        else:
            result = []
        return result
Esempio n. 9
0
    def __init__(self, key_prefix, count=0, data=None, offset=0):

        EIEIOWithoutPayloadDataMessage.__init__(
            self,
            EIEIODataHeader(EIEIOType.KEY_16_BIT,
                            is_time=True,
                            prefix=key_prefix,
                            prefix_type=EIEIOPrefix.UPPER_HALF_WORD,
                            count=count), data, offset)
    def min_packet_length(eieio_type, is_prefix=False, is_payload_base=False):
        """ The minimum length of a message with the given header, in bytes

        :param eieio_type: the type of message
        :type eieio_type:\
                    :py:class:`spinnman.spinnman.messages.eieio.eieio_type.EIEIOType`
        :param is_prefix: True if there is a prefix, False otherwise
        :type is_prefix: bool
        :param is_payload_base: True if there is a payload base, False\
                    otherwise
        :type is_payload_base: bool
        :return: The minimum size of the packet in bytes
        :rtype: int
        """
        header_size = EIEIODataHeader.get_header_size(eieio_type, is_prefix,
                                                      is_payload_base)
        return header_size + eieio_type.payload_bytes
Esempio n. 11
0
    def min_packet_length(eieio_type, is_prefix=False, is_payload_base=False):
        """ The minimum length of a message with the given header, in bytes

        :param eieio_type: the type of message
        :type eieio_type:\
                    :py:class:`spinnman.spinnman.messages.eieio.eieio_type.EIEIOType`
        :param is_prefix: True if there is a prefix, False otherwise
        :type is_prefix: bool
        :param is_payload_base: True if there is a payload base, False\
                    otherwise
        :type is_payload_base: bool
        :return: The minimum size of the packet in bytes
        :rtype: int
        """
        header_size = EIEIODataHeader.get_header_size(eieio_type, is_prefix,
                                                      is_payload_base)
        return header_size + eieio_type.payload_bytes
Esempio n. 12
0
    def get_spikes(self, label, transceiver, region, placements, graph_mapper,
                   partitionable_vertex):

        results = list()
        ms_per_tick = self._machine_time_step / 1000.0
        subvertices = \
            graph_mapper.get_subvertices_from_vertex(partitionable_vertex)
        progress_bar = ProgressBar(len(subvertices),
                                   "Getting spikes for {}".format(label))

        for subvertex in subvertices:

            placement = placements.get_placement_of_subvertex(subvertex)
            subvertex_slice = graph_mapper.get_subvertex_slice(subvertex)

            # Read the spikes
            spike_data = recording_utils.get_data(
                transceiver, placement, region, subvertex.region_size)

            number_of_bytes_written = len(spike_data)

            offset = 0
            while offset < number_of_bytes_written:
                eieio_header = EIEIODataHeader.from_bytestring(
                    spike_data, offset)
                offset += eieio_header.size
                timestamp = eieio_header.payload_base * ms_per_tick
                timestamps = numpy.repeat([timestamp], eieio_header.count)
                keys = numpy.frombuffer(
                    spike_data, dtype="<u4", count=eieio_header.count,
                    offset=offset)
                neuron_ids = \
                    (keys - subvertex.base_key) + subvertex_slice.lo_atom
                offset += eieio_header.count * 4
                results.append(numpy.dstack((neuron_ids, timestamps))[0])
            progress_bar.update()

        progress_bar.end()
        result = numpy.vstack(results)
        result = result[numpy.lexsort((result[:, 1], result[:, 0]))]
        return result
 def __init__(self, payload_prefix, count=0, data=None, offset=0):
     EIEIOWithPayloadDataMessage.__init__(
         self, EIEIODataHeader(EIEIOType.KEY_PAYLOAD_16_BIT,
                               payload_base=payload_prefix, count=count),
         data, offset)
 def __init__(self, count=0, data=None, offset=0):
     EIEIOWithPayloadDataMessage.__init__(
         self, EIEIODataHeader(EIEIOType.KEY_PAYLOAD_32_BIT, count=count),
         data, offset)
Esempio n. 15
0
    import StopRequests

from spynnaker.pyNN.exceptions import BufferableRegionTooSmall
from spynnaker.pyNN.utilities import utility_calls
from spynnaker.pyNN.buffer_management.storage_objects.buffers_sent_deque\
    import BuffersSentDeque


logger = logging.getLogger(__name__)

# The minimum size of any message - this is the headers plus one entry
_MIN_MESSAGE_SIZE = (EIEIO32BitTimedPayloadPrefixDataMessage
                     .get_min_packet_length())

# The size of the header of a message
_HEADER_SIZE = EIEIODataHeader.get_header_size(EIEIOType.KEY_32_BIT,
                                               is_payload_base=True)

# The number of bytes in each key to be sent
_N_BYTES_PER_KEY = EIEIOType.KEY_32_BIT.key_bytes

# The number of keys allowed (different from the actual number as there is an
# additional header)
_N_KEYS_PER_MESSAGE = (constants.UDP_MESSAGE_MAX_SIZE -
                       (HostSendSequencedData.get_min_packet_length() +
                        _HEADER_SIZE) / _N_BYTES_PER_KEY)


class BufferManager(object):
    """ Manager of send buffers
    """
class BufferedSendingRegion(object):
    """ A set of keys to be sent at given timestamps for a given region of\
        data.  Note that keys must be added in timestamp order or else an\
        exception will be raised
    """

    _HEADER_SIZE = EIEIODataHeader.get_header_size(EIEIOType.KEY_32_BIT,
                                                   is_payload_base=True)

    # The number of bytes in each key to be sent
    _N_BYTES_PER_KEY = EIEIOType.KEY_32_BIT.key_bytes  # @UndefinedVariable

    # The number of keys allowed (different from the actual number as there is
    #  an additional header)
    _N_KEYS_PER_MESSAGE = (constants.UDP_MESSAGE_MAX_SIZE -
                           (HostSendSequencedData.get_min_packet_length() +
                            _HEADER_SIZE)) / _N_BYTES_PER_KEY

    def __init__(self, max_buffer_size):
        self._max_size_of_buffer = max_buffer_size
        self.clear()

    @property
    def buffer_size(self):
        """
        property method for getting the max size of this buffer
        :return:
        """
        if self._buffer_size is None:
            self._calculate_sizes()
        return self._buffer_size

    @property
    def total_region_size(self):
        """ Get the max size of this region
        :return:
        """
        if self._total_region_size is None:
            self._calculate_sizes()
        return self._total_region_size

    @property
    def max_buffer_size_possible(self):
        """ Get the max possible size of a buffer from this region
        :return:
        """
        return self._max_size_of_buffer

    def _calculate_sizes(self):
        """ Deduce how big the buffer and the region needs to be
        :return:
        """
        size = 0
        for timestamp in self._timestamps:
            n_keys = self.get_n_keys(timestamp)
            size += self.get_n_bytes(n_keys)
        size += EventStopRequest.get_min_packet_length()
        if size > self._max_size_of_buffer:
            self._buffer_size = self._max_size_of_buffer
        else:
            self._buffer_size = size
        self._total_region_size = size

    def get_n_bytes(self, n_keys):
        """ Get the number of bytes used by a given number of keys

        :param n_keys: The number of keys
        :type n_keys: int
        """

        # Get the total number of messages
        n_messages = int(math.ceil(float(n_keys) / self._N_KEYS_PER_MESSAGE))

        # Add up the bytes
        return ((self._HEADER_SIZE * n_messages) +
                (n_keys * self._N_BYTES_PER_KEY))

    def add_key(self, timestamp, key):
        """ Add a key to be sent at a given time

        :param timestamp: The time at which the key is to be sent
        :type timestamp: int
        :param key: The key to send
        :type key: int
        """
        if timestamp not in self._buffer:
            bisect.insort(self._timestamps, timestamp)
            self._buffer[timestamp] = list()
        self._buffer[timestamp].append(key)
        self._total_region_size = None
        self._buffer_size = None

    def add_keys(self, timestamp, keys):
        """ Add a set of keys to be sent at the given time

        :param timestamp: The time at which the keys are to be sent
        :type timestamp: int
        :param keys: The keys to send
        :type keys: iterable of int
        """
        for key in keys:
            self.add_key(timestamp, key)

    @property
    def n_timestamps(self):
        """ The number of timestamps available

        :rtype: int
        """
        return len(self._timestamps)

    @property
    def timestamps(self):
        """ The timestamps for which there are keys

        :rtype: iterable of int
        """
        return self._timestamps

    def get_n_keys(self, timestamp):
        """ Get the number of keys for a given timestamp

        :param timestamp: the time stamp to check if there's still keys to\
                transmit
        """
        if timestamp in self._buffer:
            return len(self._buffer[timestamp])
        return 0

    @property
    def is_next_timestamp(self):
        """ Determines if the region is empty
        :return: True if the region is empty, false otherwise
        :rtype: bool
        """
        return self._current_timestamp_pos < len(self._timestamps)

    @property
    def next_timestamp(self):
        """ The next timestamp of the data to be sent, or None if no more data

        :rtype: int or None
        """
        if self.is_next_timestamp:
            return self._timestamps[self._current_timestamp_pos]
        return None

    def is_next_key(self, timestamp):
        """ Determine if there is another key for the given timestamp

        :param timestamp: the time stamp to check if there's still keys to\
                transmit
        :rtype: bool
        """
        if timestamp in self._buffer:
            return len(self._buffer[timestamp]) > 0
        return False

    @property
    def next_key(self):
        """ The next key to be sent

        :rtype: int
        """
        next_timestamp = self.next_timestamp
        keys = self._buffer[next_timestamp]
        key = keys.pop()
        if len(keys) == 0:
            del self._buffer[next_timestamp]
            self._current_timestamp_pos += 1
        return key

    @property
    def current_timestamp(self):
        """ Get the current timestamp in the iterator
        """
        return self._current_timestamp_pos

    def rewind(self):
        """ Rewind the buffer to initial position.
        """
        self._current_timestamp_pos = 0

    def clear(self):
        """ Clears the buffer
        """

        # A dictionary of timestamp -> list of keys
        self._buffer = dict()

        # A list of timestamps
        self._timestamps = list()

        # The current position in the list of timestamps
        self._current_timestamp_pos = 0

        self._buffer_size = None

        self._total_region_size = None
Esempio n. 17
0
    def get_spikes(
            self, txrx, placements, graph_mapper, compatible_output=False):
        """
        Return a 2-column numpy array containing cell ids and spike times for
        recorded cells.   This is read directly from the memory for the board.

        :param transceiver:
        :param placements:
        :param graph_mapper:
        :param compatible_output:
        """

        logger.info("Getting spikes for {}".format(self._label))

        # Find all the sub-vertices that this pynn_population.py exists on
        subvertices = graph_mapper.get_subvertices_from_vertex(self)
        progress_bar = ProgressBar(len(subvertices), "Getting spikes")
        results = list()
        for subvertex in subvertices:
            placement = placements.get_placement_of_subvertex(subvertex)
            (x, y, p) = placement.x, placement.y, placement.p
            subvertex_slice = graph_mapper.get_subvertex_slice(subvertex)
            lo_atom = subvertex_slice.lo_atom
            hi_atom = subvertex_slice.hi_atom

            logger.debug("Reading spikes from chip {}, {}, core {}, "
                         "lo_atom {} hi_atom {}".format(
                             x, y, p, lo_atom, hi_atom))

            # Get the App Data for the core
            app_data_base_address = \
                txrx.get_cpu_information_from_core(x, y, p).user[0]

            # Get the position of the spike buffer
            spike_region_base_address_offset = \
                dsg_utility_calls.get_region_base_address_offset(
                    app_data_base_address,
                    self._SPIKE_SOURCE_REGIONS
                    .SPIKE_DATA_RECORDED_REGION.value)
            spike_region_base_address_buf = buffer(txrx.read_memory(
                x, y, spike_region_base_address_offset, 4))
            spike_region_base_address = struct.unpack_from(
                "<I", spike_region_base_address_buf)[0]
            spike_region_base_address += app_data_base_address

            # Read the spike data size
            number_of_bytes_written_buf = buffer(txrx.read_memory(
                x, y, spike_region_base_address, 4))
            number_of_bytes_written = struct.unpack_from(
                "<I", number_of_bytes_written_buf)[0]

            # check that the number of spikes written is smaller or the same as
            # the size of the memory region we allocated for spikes
            send_buffer = self._get_spike_send_buffer(subvertex_slice)
            if number_of_bytes_written > send_buffer.total_region_size:
                raise exceptions.MemReadException(
                    "the amount of memory written ({}) was larger than was "
                    "allocated for it ({})"
                    .format(number_of_bytes_written,
                            send_buffer.total_region_size))

            # Read the spikes
            logger.debug("Reading {} ({}) bytes starting at {} + 4"
                         .format(number_of_bytes_written,
                                 hex(number_of_bytes_written),
                                 hex(spike_region_base_address)))
            spike_data_block = txrx.read_memory(
                x, y, spike_region_base_address + 4, number_of_bytes_written)

            # translate block of spikes into EIEIO messages
            offset = 0
            while offset <= number_of_bytes_written - 4:
                eieio_header = EIEIODataHeader.from_bytestring(
                    spike_data_block, offset)
                offset += eieio_header.size
                timestamps = numpy.repeat([eieio_header.payload_base],
                                          eieio_header.count)
                keys = numpy.frombuffer(
                    spike_data_block, dtype="<u4", count=eieio_header.count,
                    offset=offset)
                neuron_ids = ((keys - subvertex.base_key) +
                              subvertex_slice.lo_atom)
                offset += eieio_header.count * 4
                results.append(numpy.dstack((neuron_ids, timestamps))[0])

            # complete the buffer
            progress_bar.update()
        progress_bar.end()

        result = numpy.vstack(results)
        result = result[numpy.lexsort((result[:, 1], result[:, 0]))]
        return result