Пример #1
0
def read_eieio_data_message(data, offset):
    eieio_header = EIEIODataHeader.from_bytestring(data, offset)
    offset += eieio_header.size
    eieio_type = eieio_header.eieio_type
    prefix = eieio_header.prefix
    payload_base = eieio_header.payload_base
    prefix_type = eieio_header.prefix_type
    is_time = eieio_header.is_time
    if eieio_type == EIEIOType.KEY_16_BIT:
        return _read_16_bit_message(
            prefix, payload_base, prefix_type, is_time, data, offset,
            eieio_header)
    elif eieio_type == EIEIOType.KEY_PAYLOAD_16_BIT:
        return _read_16_bit_payload_message(
            prefix, payload_base, prefix_type, is_time, data, offset,
            eieio_header)
    elif eieio_type == EIEIOType.KEY_32_BIT:
        return _read_32_bit_message(
            prefix, payload_base, prefix_type, is_time, data, offset,
            eieio_header)
    elif eieio_type == EIEIOType.KEY_PAYLOAD_32_BIT:
        return _read_32_bit_payload_message(
            prefix, payload_base, prefix_type, is_time, data, offset,
            eieio_header)
    return EIEIODataMessage(eieio_header, data, offset)
def read_eieio_data_message(data, offset):
    """ Reads the content of an EIEIO data message and returns an object\
        identifying the data which was contained in the packet

    :param data: data received from the network
    :type data: bytestring
    :param offset: offset at which the parsing operation should start
    :type offset: int
    :return: an object which inherits from EIEIODataMessage which contains\
            parsed data received from the network
    :rtype:\
            :py:class:`spinnman.messages.eieio.data_messages.eieio_data_message.EIEIODataMessage`
    """
    eieio_header = EIEIODataHeader.from_bytestring(data, offset)
    offset += eieio_header.size
    eieio_type = eieio_header.eieio_type
    prefix = eieio_header.prefix
    payload_base = eieio_header.payload_base
    prefix_type = eieio_header.prefix_type
    is_time = eieio_header.is_time
    if eieio_type == EIEIOType.KEY_16_BIT:
        return _read_16_bit_message(prefix, payload_base, prefix_type, is_time,
                                    data, offset, eieio_header)
    elif eieio_type == EIEIOType.KEY_PAYLOAD_16_BIT:
        return _read_16_bit_payload_message(prefix, payload_base, prefix_type,
                                            is_time, data, offset,
                                            eieio_header)
    elif eieio_type == EIEIOType.KEY_32_BIT:
        return _read_32_bit_message(prefix, payload_base, prefix_type, is_time,
                                    data, offset, eieio_header)
    elif eieio_type == EIEIOType.KEY_PAYLOAD_32_BIT:
        return _read_32_bit_payload_message(prefix, payload_base, prefix_type,
                                            is_time, data, offset,
                                            eieio_header)
    return EIEIODataMessage(eieio_header, data, offset)
Пример #3
0
    def get_spikes(self, label, buffer_manager, region, placements,
                   graph_mapper, application_vertex, base_key_function,
                   machine_time_step):

        results = list()
        missing_str = ""
        ms_per_tick = machine_time_step / 1000.0
        vertices = \
            graph_mapper.get_machine_vertices(application_vertex)
        progress_bar = ProgressBar(len(vertices),
                                   "Getting spikes for {}".format(label))

        for vertex in vertices:

            placement = placements.get_placement_of_vertex(vertex)
            vertex_slice = graph_mapper.get_slice(vertex)

            x = placement.x
            y = placement.y
            p = placement.p

            # Read the spikes
            raw_spike_data, data_missing = \
                buffer_manager.get_data_for_vertex(placement, region)
            if data_missing:
                missing_str += "({}, {}, {}); ".format(x, y, p)
            spike_data = str(raw_spike_data.read_all())
            number_of_bytes_written = len(spike_data)

            offset = 0
            while offset < number_of_bytes_written:
                eieio_header = EIEIODataHeader.from_bytestring(
                    spike_data, offset)
                offset += eieio_header.size
                timestamp = eieio_header.payload_base * ms_per_tick
                timestamps = numpy.repeat([timestamp], eieio_header.count)
                keys = numpy.frombuffer(spike_data,
                                        dtype="<u4",
                                        count=eieio_header.count,
                                        offset=offset)
                neuron_ids = ((keys - base_key_function(vertex)) +
                              vertex_slice.lo_atom)
                offset += eieio_header.count * 4
                results.append(numpy.dstack((neuron_ids, timestamps))[0])
            progress_bar.update()

        progress_bar.end()
        if len(missing_str) > 0:
            logger.warn(
                "Population {} is missing spike data in region {} from the"
                " following cores: {}".format(label, region, missing_str))
        if len(results) != 0:
            result = numpy.vstack(results)
            result = result[numpy.lexsort((result[:, 1], result[:, 0]))]
        else:
            result = []
        return result
Пример #4
0
    def get_spikes(self, label, buffer_manager, region, state_region,
                   placements, graph_mapper, partitionable_vertex,
                   base_key_function):

        results = list()
        missing_str = ""
        ms_per_tick = self._machine_time_step / 1000.0
        subvertices = \
            graph_mapper.get_subvertices_from_vertex(partitionable_vertex)
        progress_bar = ProgressBar(len(subvertices),
                                   "Getting spikes for {}".format(label))

        for subvertex in subvertices:

            placement = placements.get_placement_of_subvertex(subvertex)
            subvertex_slice = graph_mapper.get_subvertex_slice(subvertex)

            x = placement.x
            y = placement.y
            p = placement.p

            # Read the spikes
            raw_spike_data, data_missing = \
                buffer_manager.get_data_for_vertex(
                    x, y, p, region, state_region)
            if data_missing:
                missing_str += "({}, {}, {}); ".format(x, y, p)
            spike_data = raw_spike_data.read_all()
            number_of_bytes_written = len(spike_data)

            offset = 0
            while offset < number_of_bytes_written:
                eieio_header = EIEIODataHeader.from_bytestring(
                    spike_data, offset)
                offset += eieio_header.size
                timestamp = eieio_header.payload_base * ms_per_tick
                timestamps = numpy.repeat([timestamp], eieio_header.count)
                keys = numpy.frombuffer(
                    spike_data, dtype="<u4", count=eieio_header.count,
                    offset=offset)
                neuron_ids = ((keys - base_key_function(subvertex)) +
                              subvertex_slice.lo_atom)
                offset += eieio_header.count * 4
                results.append(numpy.dstack((neuron_ids, timestamps))[0])
            progress_bar.update()

        progress_bar.end()
        if len(missing_str) > 0:
            logger.warn(
                "Population {} is missing spike data in region {} from the"
                " following cores: {}".format(label, region, missing_str))
        if len(results) != 0:
            result = numpy.vstack(results)
            result = result[numpy.lexsort((result[:, 1], result[:, 0]))]
        else:
            result = []
        return result
Пример #5
0
    def get_spikes(self, label, transceiver, region, placements, graph_mapper,
                   partitionable_vertex):

        results = list()
        ms_per_tick = self._machine_time_step / 1000.0
        subvertices = \
            graph_mapper.get_subvertices_from_vertex(partitionable_vertex)
        progress_bar = ProgressBar(len(subvertices),
                                   "Getting spikes for {}".format(label))

        for subvertex in subvertices:

            placement = placements.get_placement_of_subvertex(subvertex)
            subvertex_slice = graph_mapper.get_subvertex_slice(subvertex)

            # Read the spikes
            spike_data = recording_utils.get_data(
                transceiver, placement, region, subvertex.region_size)

            number_of_bytes_written = len(spike_data)

            offset = 0
            while offset < number_of_bytes_written:
                eieio_header = EIEIODataHeader.from_bytestring(
                    spike_data, offset)
                offset += eieio_header.size
                timestamp = eieio_header.payload_base * ms_per_tick
                timestamps = numpy.repeat([timestamp], eieio_header.count)
                keys = numpy.frombuffer(
                    spike_data, dtype="<u4", count=eieio_header.count,
                    offset=offset)
                neuron_ids = \
                    (keys - subvertex.base_key) + subvertex_slice.lo_atom
                offset += eieio_header.count * 4
                results.append(numpy.dstack((neuron_ids, timestamps))[0])
            progress_bar.update()

        progress_bar.end()
        result = numpy.vstack(results)
        result = result[numpy.lexsort((result[:, 1], result[:, 0]))]
        return result
Пример #6
0
    def get_spikes(
            self, txrx, placements, graph_mapper, compatible_output=False):
        """
        Return a 2-column numpy array containing cell ids and spike times for
        recorded cells.   This is read directly from the memory for the board.

        :param transceiver:
        :param placements:
        :param graph_mapper:
        :param compatible_output:
        """

        logger.info("Getting spikes for {}".format(self._label))

        # Find all the sub-vertices that this pynn_population.py exists on
        subvertices = graph_mapper.get_subvertices_from_vertex(self)
        progress_bar = ProgressBar(len(subvertices), "Getting spikes")
        results = list()
        for subvertex in subvertices:
            placement = placements.get_placement_of_subvertex(subvertex)
            (x, y, p) = placement.x, placement.y, placement.p
            subvertex_slice = graph_mapper.get_subvertex_slice(subvertex)
            lo_atom = subvertex_slice.lo_atom
            hi_atom = subvertex_slice.hi_atom

            logger.debug("Reading spikes from chip {}, {}, core {}, "
                         "lo_atom {} hi_atom {}".format(
                             x, y, p, lo_atom, hi_atom))

            # Get the App Data for the core
            app_data_base_address = \
                txrx.get_cpu_information_from_core(x, y, p).user[0]

            # Get the position of the spike buffer
            spike_region_base_address_offset = \
                dsg_utility_calls.get_region_base_address_offset(
                    app_data_base_address,
                    self._SPIKE_SOURCE_REGIONS
                    .SPIKE_DATA_RECORDED_REGION.value)
            spike_region_base_address_buf = buffer(txrx.read_memory(
                x, y, spike_region_base_address_offset, 4))
            spike_region_base_address = struct.unpack_from(
                "<I", spike_region_base_address_buf)[0]
            spike_region_base_address += app_data_base_address

            # Read the spike data size
            number_of_bytes_written_buf = buffer(txrx.read_memory(
                x, y, spike_region_base_address, 4))
            number_of_bytes_written = struct.unpack_from(
                "<I", number_of_bytes_written_buf)[0]

            # check that the number of spikes written is smaller or the same as
            # the size of the memory region we allocated for spikes
            send_buffer = self._get_spike_send_buffer(subvertex_slice)
            if number_of_bytes_written > send_buffer.total_region_size:
                raise exceptions.MemReadException(
                    "the amount of memory written ({}) was larger than was "
                    "allocated for it ({})"
                    .format(number_of_bytes_written,
                            send_buffer.total_region_size))

            # Read the spikes
            logger.debug("Reading {} ({}) bytes starting at {} + 4"
                         .format(number_of_bytes_written,
                                 hex(number_of_bytes_written),
                                 hex(spike_region_base_address)))
            spike_data_block = txrx.read_memory(
                x, y, spike_region_base_address + 4, number_of_bytes_written)

            # translate block of spikes into EIEIO messages
            offset = 0
            while offset <= number_of_bytes_written - 4:
                eieio_header = EIEIODataHeader.from_bytestring(
                    spike_data_block, offset)
                offset += eieio_header.size
                timestamps = numpy.repeat([eieio_header.payload_base],
                                          eieio_header.count)
                keys = numpy.frombuffer(
                    spike_data_block, dtype="<u4", count=eieio_header.count,
                    offset=offset)
                neuron_ids = ((keys - subvertex.base_key) +
                              subvertex_slice.lo_atom)
                offset += eieio_header.count * 4
                results.append(numpy.dstack((neuron_ids, timestamps))[0])

            # complete the buffer
            progress_bar.update()
        progress_bar.end()

        result = numpy.vstack(results)
        result = result[numpy.lexsort((result[:, 1], result[:, 0]))]
        return result