def _regenerate_data_spec_for_vertices(self, placement, vertex):
        """
        :param ~.Placement placement:
        :param ~.AbstractVertex vertex:
        :rtype: bool
        """
        # If the vertex doesn't regenerate, skip
        if not isinstance(vertex, AbstractRewritesDataSpecification):
            return False

        # If the vertex doesn't require regeneration, skip
        if not vertex.requires_memory_regions_to_be_reloaded():
            return True

        # build the writers for the reports and data
        spec_file, spec = get_data_spec_and_file_writer_filename(
            placement.x, placement.y, placement.p, self._host, self._rpt_dir,
            self._write_text, self._data_dir)

        # Execute the regeneration
        vertex.regenerate_data_specification(spec, placement)

        # execute the spec
        with FileDataReader(spec_file) as spec_reader:
            data_spec_executor = DataSpecificationExecutor(
                spec_reader, SDRAM.max_sdram_found)
            data_spec_executor.execute()
        try:
            os.remove(spec_file)
        except Exception:  # pylint: disable=broad-except
            # Ignore the deletion of files as non-critical
            pass

        # Read the region table for the placement
        regions_base_address = self._txrx.get_cpu_information_from_core(
            placement.x, placement.y, placement.p).user[0]
        start_region = get_region_base_address_offset(regions_base_address, 0)
        table_size = get_region_base_address_offset(
            regions_base_address, MAX_MEM_REGIONS) - start_region
        offsets = REGION_STRUCT.unpack_from(
            self._txrx.read_memory(placement.x, placement.y, start_region,
                                   table_size))

        # Write the regions to the machine
        for i, region in enumerate(data_spec_executor.dsef.mem_regions):
            if region is not None and not region.unfilled:
                self._txrx.write_memory(
                    placement.x, placement.y, offsets[i],
                    region.region_data[:region.max_write_pointer])

        return True
Пример #2
0
    def _regenerate_data_spec_for_vertices(transceiver, placement, vertex,
                                           hostname,
                                           reloaded_dsg_report_files_file_path,
                                           write_text_specs,
                                           reloaded_dsg_data_files_file_path):
        # pylint: disable=too-many-arguments, too-many-locals

        # If the vertex doesn't regenerate, skip
        if not isinstance(vertex, AbstractRewritesDataSpecification):
            return False

        # If the vertex doesn't require regeneration, skip
        if not vertex.requires_memory_regions_to_be_reloaded():
            return True

        # build the writers for the reports and data
        spec_file, spec = utility_calls.get_data_spec_and_file_writer_filename(
            placement.x, placement.y, placement.p, hostname,
            reloaded_dsg_report_files_file_path, write_text_specs,
            reloaded_dsg_data_files_file_path)

        # Execute the regeneration
        vertex.regenerate_data_specification(spec, placement)

        # execute the spec
        spec_reader = FileDataReader(spec_file)
        data_spec_executor = DataSpecificationExecutor(
            spec_reader, SDRAM.DEFAULT_SDRAM_BYTES)
        data_spec_executor.execute()

        # Read the region table for the placement
        regions_base_address = transceiver.get_cpu_information_from_core(
            placement.x, placement.y, placement.p).user[0]
        start_region = utility_calls.get_region_base_address_offset(
            regions_base_address, 0)
        table_size = utility_calls.get_region_base_address_offset(
            regions_base_address, MAX_MEM_REGIONS) - start_region
        offsets = struct.unpack_from(
            "<{}I".format(MAX_MEM_REGIONS),
            transceiver.read_memory(placement.x, placement.y, start_region,
                                    table_size))

        # Write the regions to the machine
        for i, region in enumerate(data_spec_executor.dsef.mem_regions):
            if region is not None and not region.unfilled:
                transceiver.write_memory(
                    placement.x, placement.y, offsets[i],
                    region.region_data[:region.max_write_pointer])

        return True
Пример #3
0
def get_data(transceiver, placement, region, region_size):
    """ Get the recorded data from a region
    """

    (x, y, p) = placement.x, placement.y, placement.p

    app_data_base_address = transceiver.get_cpu_information_from_core(
        x, y, p).user[0]
    region_base_address_offset = utility_calls.get_region_base_address_offset(
        app_data_base_address, region)
    region_base_address_buf = buffer(transceiver.read_memory(
        x, y, region_base_address_offset, 4))
    region_base_address = struct.unpack_from("<I", region_base_address_buf)[0]
    region_base_address += app_data_base_address
    number_of_bytes_written_buf = buffer(transceiver.read_memory(
        x, y, region_base_address, 4))
    number_of_bytes_written = struct.unpack_from(
        "<I", number_of_bytes_written_buf)[0]

    # Subtract 4 for the word representing the size itself
    expected_size = region_size - _RECORDING_COUNT_SIZE
    if number_of_bytes_written > expected_size:
        raise exceptions.MemReadException(
            "Expected {} bytes but read {}".format(
                expected_size, number_of_bytes_written))

    return transceiver.read_memory(
        x, y, region_base_address + 4, number_of_bytes_written)
    def write_provenance_data_in_xml(self, file_path, transceiver,
                                     placement=None):
        """ Extracts provenance data from the SDRAM of the core and stores it\
            in an xml file for end user digestion

        :param file_path: the file path to the xml document
        :param transceiver: the spinnman interface object
        :param placement: the placement object for this subvertex
        :return: None
        """
        if placement is None:
            raise ConfigurationException(
                "To acquire provenance data from the live packet gatherer,"
                "you must provide a placement object that points to where the "
                "live packet gatherer resides on the spinnaker machine")

        # Get the App Data for the core
        app_data_base_address = transceiver.get_cpu_information_from_core(
            placement.x, placement.y, placement.p).user[0]

        # Get the provenance region base address
        provanence_data_region_base_address_offset = \
            dsg_utility_calls.get_region_base_address_offset(
                app_data_base_address,
                self._LIVE_DATA_GATHER_REGIONS.PROVANENCE.value)
        provanence_data_region_base_address_buf = \
            buffer(transceiver.read_memory(
                placement.x, placement.y,
                provanence_data_region_base_address_offset, 4))
        provanence_data_region_base_address = \
            struct.unpack("I", provanence_data_region_base_address_buf)[0]
        provanence_data_region_base_address += app_data_base_address

        # read in the provenance data
        provanence_data_region_contents_buff = \
            buffer(transceiver.read_memory(
                placement.x, placement.y, provanence_data_region_base_address,
                self._PROVANENCE_REGION_SIZE))
        provanence_data_region_contents = \
            struct.unpack("<II", provanence_data_region_contents_buff)

        # create provenance data xml form
        from lxml import etree
        root = etree.Element("Live_packet_gatherer_located_at_{}_{}_{}"
                             .format(placement.x, placement.y, placement.p))
        none_payload_provanence_data = \
            etree.SubElement(root, "lost_packets_without_payload")
        payload_provanence_data = \
            etree.SubElement(root, "lost_packets_with_payload")
        none_payload_provanence_data.text = \
            str(provanence_data_region_contents[0])
        payload_provanence_data.text = \
            str(provanence_data_region_contents[1])

        # write xml form into file provided
        writer = open(file_path, "w")
        writer.write(etree.tostring(root, pretty_print=True))
        writer.flush()
        writer.close()
Пример #5
0
def get_data_region_address(transceiver, placement, region):
    # Get the App Data for the core
    app_data_base_address = transceiver.get_cpu_information_from_core(
        placement.x, placement.y, placement.p).user[0]

    # Get the provenance region base address
    address_location = get_region_base_address_offset(app_data_base_address,
                                                      region.value)
    return transceiver.read_word(placement.x, placement.y, address_location)
Пример #6
0
    def test_with_application_vertices(self):
        """ Test that an application vertex's data is rewritten correctly
        """
        # Create a default SDRAM to set the max to default
        SDRAM()
        reload_region_data = [(0, [0] * 10), (1, [1] * 20)]
        vertex = _TestApplicationVertex(10, reload_region_data)
        m_slice_1 = Slice(0, 4)
        m_slice_2 = Slice(5, 9)
        m_vertex_1 = vertex.create_machine_vertex(m_slice_1, None, None, None)
        m_vertex_2 = vertex.create_machine_vertex(m_slice_2, None, None, None)

        graph_mapper = GraphMapper()
        graph_mapper.add_vertex_mapping(m_vertex_1, m_slice_1, vertex)
        graph_mapper.add_vertex_mapping(m_vertex_2, m_slice_2, vertex)

        placements = Placements(
            [Placement(m_vertex_1, 0, 0, 1),
             Placement(m_vertex_2, 0, 0, 2)])

        user_0_addresses = {
            (placement.x, placement.y, placement.p): i * 1000
            for i, placement in enumerate(placements.placements)
        }
        region_addresses = [i for i in range(MAX_MEM_REGIONS)]
        transceiver = _MockTransceiver(user_0_addresses, region_addresses)

        reloader = DSGRegionReloader()
        reloader.__call__(transceiver, placements, "localhost", "test", False,
                          "test", graph_mapper)

        regions_rewritten = transceiver.regions_rewritten

        # Check that the number of times the data has been regenerated is
        # correct
        self.assertEqual(vertex.regenerate_call_count, placements.n_placements)

        # Check that the number of regions rewritten is correct
        self.assertEqual(len(transceiver.regions_rewritten),
                         placements.n_placements * len(reload_region_data))

        # Check that the data rewritten is correct
        for i, placement in enumerate(placements.placements):
            user_0_address = user_0_addresses[placement.x, placement.y,
                                              placement.p]
            for j in range(len(reload_region_data)):
                pos = (i * len(reload_region_data)) + j
                region, data = reload_region_data[j]
                address = get_region_base_address_offset(
                    user_0_address, 0) + region_addresses[region]
                data = bytearray(numpy.array(data, dtype="uint32").tobytes())

                # Check that the base address and data written is correct
                self.assertEqual(regions_rewritten[pos], (address, data))

        # Delete data files
        shutil.rmtree("test")
Пример #7
0
    def _get_data_region_address(transceiver, placement):
        # Get the App Data for the core
        app_data_base_address = transceiver.get_cpu_information_from_core(
            placement.x, placement.y, placement.p).user[0]

        # Get the provenance region base address
        base_address_offset = get_region_base_address_offset(
            app_data_base_address, SDRAMWriter.DATA_REGIONS.DATA.value)
        return _ONE_WORD.unpack(
            transceiver.read_memory(placement.x, placement.y,
                                    base_address_offset, _ONE_WORD.size))[0]
    def _regenerate_data_spec_for_vertices(self, placement, vertex):
        # If the vertex doesn't regenerate, skip
        if not isinstance(vertex, AbstractRewritesDataSpecification):
            return False

        # If the vertex doesn't require regeneration, skip
        if not vertex.requires_memory_regions_to_be_reloaded():
            return True

        # build the writers for the reports and data
        spec_file, spec = get_data_spec_and_file_writer_filename(
            placement.x, placement.y, placement.p, self._hostname,
            self._report_dir, self._write_text, self._app_data_dir)

        # Execute the regeneration
        vertex.regenerate_data_specification(spec, placement)

        # execute the spec
        spec_reader = FileDataReader(spec_file)
        data_spec_executor = DataSpecificationExecutor(
            spec_reader, SDRAM.DEFAULT_SDRAM_BYTES)
        data_spec_executor.execute()

        # Read the region table for the placement
        regions_base_address = self._txrx.get_cpu_information_from_core(
            placement.x, placement.y, placement.p).user[0]
        start_region = get_region_base_address_offset(regions_base_address, 0)
        table_size = get_region_base_address_offset(
            regions_base_address, MAX_MEM_REGIONS) - start_region
        offsets = REGION_STRUCT.unpack_from(
            self._txrx.read_memory(
                placement.x, placement.y, start_region, table_size))

        # Write the regions to the machine
        for i, region in enumerate(data_spec_executor.dsef.mem_regions):
            if region is not None and not region.unfilled:
                self._txrx.write_memory(
                    placement.x, placement.y, offsets[i],
                    region.region_data[:region.max_write_pointer])

        return True
Пример #9
0
    def _get_provenance_region_address(self, transceiver, placement):

        # Get the App Data for the core
        app_data_base_address = transceiver.get_cpu_information_from_core(
            placement.x, placement.y, placement.p).user[0]

        # Get the provenance region base address
        base_address_offset = get_region_base_address_offset(
            app_data_base_address, self._provenance_region_id)
        base_address = transceiver.read_memory(placement.x, placement.y,
                                               base_address_offset, 4)
        return _ONE_WORD.unpack(base_address)[0]
    def _get_provenance_region_address(self, transceiver, placement):

        # Get the App Data for the core
        app_data_base_address = transceiver.get_cpu_information_from_core(
            placement.x, placement.y, placement.p).user[0]

        # Get the provenance region base address
        base_address_offset = get_region_base_address_offset(
            app_data_base_address, self._provenance_region_id)
        base_address = transceiver.read_memory(
            placement.x, placement.y, base_address_offset, 4)
        return _ONE_WORD.unpack(base_address)[0]
Пример #11
0
    def _get_provenance_region_address(self, transceiver, placement):

        # Get the App Data for the core
        app_data_base_address = transceiver.get_cpu_information_from_core(
            placement.x, placement.y, placement.p).user[0]

        # Get the provenance region base address
        provenance_data_region_base_address_offset = \
            dsg_utility_calls.get_region_base_address_offset(
                app_data_base_address, self._provenance_region_id)
        provenance_data_region_base_address_buff = buffer(
            transceiver.read_memory(
                placement.x, placement.y,
                provenance_data_region_base_address_offset, 4))
        provenance_data_region_base_address = struct.unpack(
            "<I", provenance_data_region_base_address_buff)[0]
        return provenance_data_region_base_address
    def locate_master_pop_table_base_address(self, x, y, p, transceiver,
                                             master_pop_table_region):
        """

        :param x: x coord for the chip to whcih this master pop table is \
        being read
        :type x: int
        :param y: y coord for the chip to whcih this master pop table is \
        being read
        :type y: int
        :param p: p coord for the processor to whcih this master pop table is \
        being read
        :type p: int
        :param transceiver: the transciever object
        :type transceiver: spinnman.transciever.Transciever object
        :param master_pop_table_region: the region to which the master pop\
         resides
         :type master_pop_table_region: int


        :return: the master pop table in some form
        """
        # Get the App Data base address for the core
        # (location where this cores memory starts in
        # sdram and region table)
        app_data_base_address = \
            transceiver.get_cpu_information_from_core(x, y, p).user[0]

        # Get the memory address of the master pop table region
        master_pop_region = master_pop_table_region

        master_region_base_address_address = \
            dsg_utility.get_region_base_address_offset(
                app_data_base_address, master_pop_region)

        master_region_base_address_offset = \
            self.read_and_convert(x, y, master_region_base_address_address,
                                  4, "<I", transceiver)

        master_region_base_address =\
            master_region_base_address_offset + app_data_base_address

        return master_region_base_address, app_data_base_address
    def locate_master_pop_table_base_address(self, x, y, p, transceiver,
                                             master_pop_table_region):
        """

        :param x: x coord for the chip to whcih this master pop table is \
        being read
        :type x: int
        :param y: y coord for the chip to whcih this master pop table is \
        being read
        :type y: int
        :param p: p coord for the processor to whcih this master pop table is \
        being read
        :type p: int
        :param transceiver: the transciever object
        :type transceiver: spinnman.transciever.Transciever object
        :param master_pop_table_region: the region to which the master pop\
         resides
         :type master_pop_table_region: int


        :return: the master pop table in some form
        """
        # Get the App Data base address for the core
        # (location where this cores memory starts in
        # sdram and region table)
        app_data_base_address = \
            transceiver.get_cpu_information_from_core(x, y, p).user[0]

        # Get the memory address of the master pop table region
        master_pop_region = master_pop_table_region

        master_region_base_address_address = \
            dsg_utility.get_region_base_address_offset(
                app_data_base_address, master_pop_region)

        master_region_base_address_offset = helpful_functions.read_data(
            x, y, master_region_base_address_address, 4, "<I", transceiver)

        master_region_base_address =\
            master_region_base_address_offset + app_data_base_address

        return master_region_base_address, app_data_base_address
def locate_memory_region_for_placement(placement, region, transceiver):
    """ Get the address of a region for a placement

    :param region: the region to locate the base address of
    :type region: int
    :param placement: the placement object to get the region address of
    :type placement: pacman.model.placements.Placement
    :param transceiver: the python interface to the SpiNNaker machine
    :type transceiver: spiNNMan.transciever.Transciever
    """
    regions_base_address = transceiver.get_cpu_information_from_core(
        placement.x, placement.y, placement.p).user[0]

    # Get the position of the region in the pointer table
    region_offset = utility_calls.get_region_base_address_offset(
        regions_base_address, region)

    # Get the actual address of the region
    region_address = transceiver.read_memory(
        placement.x, placement.y, region_offset, 4)
    return _ONE_WORD.unpack_from(region_address)[0]
def locate_memory_region_for_placement(placement, region, transceiver):
    """ Get the address of a region for a placement

    :param region: the region to locate the base address of
    :type region: int
    :param placement: the placement object to get the region address of
    :type placement: pacman.model.placements.Placement
    :param transceiver: the python interface to the SpiNNaker machine
    :type transceiver: spiNNMan.transciever.Transciever
    """
    regions_base_address = transceiver.get_cpu_information_from_core(
        placement.x, placement.y, placement.p).user[0]

    # Get the position of the region in the pointer table
    region_offset = utility_calls.get_region_base_address_offset(
        regions_base_address, region)

    # Get the actual address of the region
    region_address = transceiver.read_memory(
        placement.x, placement.y, region_offset, 4)
    return _ONE_WORD.unpack_from(region_address)[0]
Пример #16
0
    def _locate_region_address(self, region, vertex):
        """ Get the address of a region for a vertex

        :param region: the region to locate the base address of
        :type region: int
        :param vertex: the vertex to load a buffer for
        :type vertex:\
                    :py:class:`spynnaker.pyNN.models.abstract_models.buffer_models.abstract_sends_buffers_from_host_partitioned_vertex.AbstractSendsBuffersFromHostPartitionedVertex`
        :return: None
        """
        placement = self._placements.get_placement_of_subvertex(vertex)
        app_data_base_address = \
            self._transceiver.get_cpu_information_from_core(
                placement.x, placement.y, placement.p).user[0]

        # Get the position of the region in the pointer table
        region_offset_in_pointer_table = \
            dsg_utilities.get_region_base_address_offset(
                app_data_base_address, region)
        region_offset = str(list(self._transceiver.read_memory(
            placement.x, placement.y, region_offset_in_pointer_table, 4))[0])
        return struct.unpack("<I", region_offset)[0] + app_data_base_address
Пример #17
0
def locate_memory_region_for_placement(placement, region, transceiver):
    """ Get the address of a region for a placement

    :param region: the region to locate the base address of
    :type region: int
    :param placement: the placement object to get the region address of
    :type placement: pacman.model.placements.Placement
    :param transceiver: the python interface to the spinnaker machine
    :type transceiver: spiNNMan.transciever.Transciever
    """
    regions_base_address = transceiver.get_cpu_information_from_core(
        placement.x, placement.y, placement.p).user[0]

    # Get the position of the region in the pointer table
    region_offset_in_pointer_table = \
        utility_calls.get_region_base_address_offset(
            regions_base_address, region)
    region_address = buffer(
        transceiver.read_memory(placement.x, placement.y,
                                region_offset_in_pointer_table, 4))
    region_address_decoded = struct.unpack_from("<I", region_address)[0]
    return region_address_decoded
    def _get_spikes(
            self, graph_mapper, placements, transceiver, compatible_output,
            spike_recording_region, sub_vertex_out_spike_bytes_function):
        """
        Return a 2-column numpy array containing cell ids and spike times for
        recorded cells.   This is read directly from the memory for the board.
        """

        spike_times = list()
        spike_ids = list()
        ms_per_tick = self._machine_time_step / 1000.0

        # Find all the sub-vertices that this pynn_population.py exists on
        subvertices = graph_mapper.get_subvertices_from_vertex(self)
        progress_bar = ProgressBar(
            len(subvertices), "Getting spikes for {}".format(self._label))
        for subvertex in subvertices:
            placement = placements.get_placement_of_subvertex(subvertex)
            (x, y, p) = placement.x, placement.y, placement.p
            subvertex_slice = graph_mapper.get_subvertex_slice(subvertex)
            lo_atom = subvertex_slice.lo_atom
            hi_atom = subvertex_slice.hi_atom

            logger.debug("Reading spikes from chip {}, {}, core {}, "
                         "lo_atom {} hi_atom {}".format(
                             x, y, p, lo_atom, hi_atom))

            # Get the App Data for the core
            app_data_base_address = \
                transceiver.get_cpu_information_from_core(x, y, p).user[0]

            # Get the position of the spike buffer
            spike_region_base_address_offset = \
                dsg_utility_calls.get_region_base_address_offset(
                    app_data_base_address, spike_recording_region)
            spike_region_base_address_buf = buffer(transceiver.read_memory(
                x, y, spike_region_base_address_offset, 4))
            spike_region_base_address = struct.unpack_from(
                "<I", spike_region_base_address_buf)[0]
            spike_region_base_address += app_data_base_address

            # Read the spike data size
            number_of_bytes_written_buf = buffer(transceiver.read_memory(
                x, y, spike_region_base_address, 4))
            number_of_bytes_written = struct.unpack_from(
                "<I", number_of_bytes_written_buf)[0]

            # check that the number of spikes written is smaller or the same as
            # the size of the memory region we allocated for spikes
            out_spike_bytes = sub_vertex_out_spike_bytes_function(
                subvertex, subvertex_slice)
            size_of_region = self.get_recording_region_size(out_spike_bytes)

            if number_of_bytes_written > size_of_region:
                raise exceptions.MemReadException(
                    "the amount of memory written ({}) was larger than was "
                    "allocated for it ({})"
                    .format(number_of_bytes_written, size_of_region))

            # Read the spikes
            logger.debug("Reading {} ({}) bytes starting at {} + 4"
                         .format(number_of_bytes_written,
                                 hex(number_of_bytes_written),
                                 hex(spike_region_base_address)))
            spike_data = transceiver.read_memory(
                x, y, spike_region_base_address + 4, number_of_bytes_written)
            numpy_data = numpy.asarray(spike_data, dtype="uint8").view(
                dtype="uint32").byteswap().view("uint8")
            bits = numpy.fliplr(numpy.unpackbits(numpy_data).reshape(
                (-1, 32))).reshape((-1, out_spike_bytes * 8))
            times, indices = numpy.where(bits == 1)
            times = times * ms_per_tick
            indices = indices + lo_atom
            spike_ids.append(indices)
            spike_times.append(times)
            progress_bar.update()

        progress_bar.end()
        spike_ids = numpy.hstack(spike_ids)
        spike_times = numpy.hstack(spike_times)
        result = numpy.dstack((spike_ids, spike_times))[0]
        return result[numpy.lexsort((spike_times, spike_ids))]
    def _get_gsyn(
            self, region, compatible_output, has_ran, graph_mapper, placements,
            txrx, machine_time_step, runtime):
        if not has_ran:
            raise exceptions.SpynnakerException(
                "The simulation has not yet ran, therefore neuron param "
                "cannot be retrieved")

        ms_per_tick = self._machine_time_step / 1000.0
        n_timesteps = runtime / ms_per_tick

        tempfilehandle = tempfile.NamedTemporaryFile()
        data = numpy.memmap(
            tempfilehandle.file, shape=(n_timesteps, self._n_atoms),
            dtype="float64,float64,float64,float64")
        data["f0"] = (numpy.arange(self._n_atoms * n_timesteps) %
                      self._n_atoms).reshape((n_timesteps, self._n_atoms))
        data["f1"] = numpy.repeat(numpy.arange(0, n_timesteps * ms_per_tick,
                                  ms_per_tick), self._n_atoms).reshape(
                                      (n_timesteps, self._n_atoms))

        # Find all the sub-vertices that this pynn_population.py exists on
        subvertices = graph_mapper.get_subvertices_from_vertex(self)
        progress_bar = ProgressBar(
            len(subvertices), "Getting recorded gsyn for {}".format(
                self._label))
        for subvertex in subvertices:
            placment = placements.get_placement_of_subvertex(subvertex)
            (x, y, p) = placment.x, placment.y, placment.p

            # Get the App Data for the core
            app_data_base_address = txrx.\
                get_cpu_information_from_core(x, y, p).user[0]

            # Get the position of the value buffer
            neuron_param_region_base_address_offset = \
                dsg_utility_calls.get_region_base_address_offset(
                    app_data_base_address, region)
            neuron_param_region_base_address_buf = buffer(txrx.read_memory(
                x, y, neuron_param_region_base_address_offset, 4))
            neuron_param_region_base_address = struct.unpack_from(
                "<I", neuron_param_region_base_address_buf)[0]
            neuron_param_region_base_address += app_data_base_address

            # Read the size
            number_of_bytes_written_buf = buffer(txrx.read_memory(
                x, y, neuron_param_region_base_address, 4))

            number_of_bytes_written = struct.unpack_from(
                "<I", number_of_bytes_written_buf)[0]

            # Read the values
            logger.debug("Reading {} ({}) bytes starting at {}".format(
                number_of_bytes_written, hex(number_of_bytes_written),
                hex(neuron_param_region_base_address + 4)))

            neuron_param_region_data = txrx.read_memory(
                x, y, neuron_param_region_base_address + 4,
                number_of_bytes_written)

            vertex_slice = graph_mapper.get_subvertex_slice(subvertex)

            bytes_per_time_step = vertex_slice.n_atoms * 4

            number_of_time_steps_written = \
                number_of_bytes_written / bytes_per_time_step

            logger.debug("Processing {} timesteps"
                         .format(number_of_time_steps_written))

            numpy_data = (numpy.asarray(
                neuron_param_region_data, dtype="uint8").view(dtype="<i4") /
                32767.0).reshape((n_timesteps, vertex_slice.n_atoms * 2))
            data["f2"][:, vertex_slice.lo_atom:vertex_slice.hi_atom + 1] =\
                numpy_data[:, 0::2]
            data["f3"][:, vertex_slice.lo_atom:vertex_slice.hi_atom + 1] =\
                numpy_data[:, 1::2]
            progress_bar.update()

        progress_bar.end()
        data.shape = self._n_atoms * n_timesteps

        # Sort the data - apparently, using lexsort is faster, but it might
        # consume more memory, so the option is left open for sort-in-place
        order = numpy.lexsort((data["f1"], data["f0"]))
        # data.sort(order=['f0', 'f1'], axis=0)

        result = data.view(dtype="float64").reshape(
            (self._n_atoms * n_timesteps, 4))[order]
        return result
    def get_neuron_parameter(
            self, region, compatible_output, has_ran, graph_mapper, placements,
            txrx, machine_time_step):
        if not has_ran:
            raise exceptions.SpynnakerException(
                "The simulation has not yet ran, therefore neuron param "
                "cannot be retrieved")

        times = numpy.zeros(0)
        ids = numpy.zeros(0)
        values = numpy.zeros(0)
        ms_per_tick = self._machine_time_step / 1000.0

        # Find all the sub-vertices that this pynn_population.py exists on
        subvertices = graph_mapper.get_subvertices_from_vertex(self)
        progress_bar = ProgressBar(len(subvertices), "Getting recorded data")
        for subvertex in subvertices:
            placment = placements.get_placement_of_subvertex(subvertex)
            (x, y, p) = placment.x, placment.y, placment.p

            # Get the App Data for the core
            app_data_base_address = txrx.\
                get_cpu_information_from_core(x, y, p).user[0]

            # Get the position of the value buffer
            neuron_param_region_base_address_offset = \
                dsg_utility_calls.get_region_base_address_offset(
                    app_data_base_address, region)
            neuron_param_region_base_address_buf = str(list(txrx.read_memory(
                x, y, neuron_param_region_base_address_offset, 4))[0])
            neuron_param_region_base_address = \
                struct.unpack("<I", neuron_param_region_base_address_buf)[0]
            neuron_param_region_base_address += app_data_base_address

            # Read the size
            number_of_bytes_written_buf = \
                str(list(txrx.read_memory(
                    x, y, neuron_param_region_base_address, 4))[0])

            number_of_bytes_written = \
                struct.unpack_from("<I", number_of_bytes_written_buf)[0]

            # Read the values
            logger.debug("Reading {} ({}) bytes starting at {}".format(
                number_of_bytes_written, hex(number_of_bytes_written),
                hex(neuron_param_region_base_address + 4)))

            neuron_param_region_data = txrx.read_memory(
                x, y, neuron_param_region_base_address + 4,
                number_of_bytes_written)

            vertex_slice = graph_mapper.get_subvertex_slice(subvertex)
            n_atoms = (vertex_slice.hi_atom - vertex_slice.lo_atom) + 1

            bytes_per_time_step = n_atoms * 4

            number_of_time_steps_written = \
                number_of_bytes_written / bytes_per_time_step

            logger.debug("Processing {} timesteps"
                         .format(number_of_time_steps_written))

            data_list = bytearray()
            for data in neuron_param_region_data:
                data_list.extend(data)

            numpy_data = numpy.asarray(data_list, dtype="uint8").view(
                dtype="<i4") / 32767.0
            values = numpy.append(values, numpy_data)
            times = numpy.append(
                times, numpy.repeat(range(numpy_data.size / n_atoms),
                                    n_atoms) * ms_per_tick)
            ids = numpy.append(ids, numpy.add(
                numpy.arange(numpy_data.size) % n_atoms, vertex_slice.lo_atom))
            progress_bar.update()

        progress_bar.end()
        result = numpy.dstack((ids, times, values))[0]
        result = result[numpy.lexsort((times, ids))]
        return result
Пример #21
0
    def _retrieve_synaptic_block(self, placements, transceiver, pre_subvertex,
                                 pre_n_atoms, post_subvertex, routing_infos,
                                 subgraph):
        """
        reads in a synaptic block from a given processor and subvertex on the
        machine.
        """
        post_placement = placements.get_placement_of_subvertex(post_subvertex)
        post_x, post_y, post_p = \
            post_placement.x, post_placement.y, post_placement.p

        # either read in the master pop table or retrieve it from storage
        master_pop_base_mem_address, app_data_base_address = \
            self._master_pop_table_generator.\
            locate_master_pop_table_base_address(
                post_x, post_y, post_p, transceiver,
                constants.POPULATION_BASED_REGIONS.POPULATION_TABLE.value)

        incoming_edges = subgraph.incoming_subedges_from_subvertex(
            post_subvertex)
        incoming_key_combo = None
        for subedge in incoming_edges:
            if subedge.pre_subvertex == pre_subvertex:
                routing_info = \
                    routing_infos.get_subedge_information_from_subedge(subedge)
                keys_and_masks = routing_info.keys_and_masks
                incoming_key_combo = keys_and_masks[0].key
                break

        maxed_row_length, synaptic_block_base_address_offset = \
            self._master_pop_table_generator.\
            extract_synaptic_matrix_data_location(
                incoming_key_combo, master_pop_base_mem_address,
                transceiver, post_x, post_y)

        block = None
        if maxed_row_length > 0:

            # calculate the synaptic block size in words
            synaptic_block_size = (
                pre_n_atoms * 4 *
                (constants.SYNAPTIC_ROW_HEADER_WORDS + maxed_row_length))

            # read in the base address of the synaptic matrix in the app region
            # table
            synapse_region_base_address_location = \
                dsg_utilities.get_region_base_address_offset(
                    app_data_base_address,
                    constants.POPULATION_BASED_REGIONS.SYNAPTIC_MATRIX.value)

            # read in the memory address of the synaptic_region base address
            synapse_region_base_address = helpful_functions.read_data(
                post_x, post_y, synapse_region_base_address_location, 4, "<I",
                transceiver)

            # the base address of the synaptic block in absolute terms is the app
            # base, plus the synaptic matrix base plus the offset
            synaptic_block_base_address = (app_data_base_address +
                                           synapse_region_base_address +
                                           synaptic_block_base_address_offset)

            # read in and return the synaptic block
            block = transceiver.read_memory(post_x, post_y,
                                            synaptic_block_base_address,
                                            synaptic_block_size)

            if len(block) != synaptic_block_size:
                raise exceptions.SynapticBlockReadException(
                    "Not enough data has been read"
                    " (aka, something funkky happened)")
        return block, maxed_row_length
Пример #22
0
    def _get_spikes(self, graph_mapper, placements, transceiver,
                    compatible_output, spike_recording_region,
                    sub_vertex_out_spike_bytes_function):
        """
        Return a 2-column numpy array containing cell ids and spike times for
        recorded cells.   This is read directly from the memory for the board.
        """

        logger.info("Getting spikes for {}".format(self._label))

        spike_times = list()
        spike_ids = list()
        ms_per_tick = self._machine_time_step / 1000.0

        # Find all the sub-vertices that this pynn_population.py exists on
        subvertices = graph_mapper.get_subvertices_from_vertex(self)
        progress_bar = ProgressBar(len(subvertices), "Getting spikes")
        for subvertex in subvertices:
            placement = placements.get_placement_of_subvertex(subvertex)
            (x, y, p) = placement.x, placement.y, placement.p
            subvertex_slice = graph_mapper.get_subvertex_slice(subvertex)
            lo_atom = subvertex_slice.lo_atom
            hi_atom = subvertex_slice.hi_atom

            logger.debug("Reading spikes from chip {}, {}, core {}, "
                         "lo_atom {} hi_atom {}".format(
                             x, y, p, lo_atom, hi_atom))

            # Get the App Data for the core
            app_data_base_address = \
                transceiver.get_cpu_information_from_core(x, y, p).user[0]

            # Get the position of the spike buffer
            spike_region_base_address_offset = \
                dsg_utility_calls.get_region_base_address_offset(
                    app_data_base_address, spike_recording_region)
            spike_region_base_address_buf = transceiver.read_memory(
                x, y, spike_region_base_address_offset, 4)
            spike_region_base_address = struct.unpack_from(
                "<I", spike_region_base_address_buf)[0]
            spike_region_base_address += app_data_base_address

            # Read the spike data size
            number_of_bytes_written_buf = transceiver.read_memory(
                x, y, spike_region_base_address, 4)
            number_of_bytes_written = struct.unpack_from(
                "<I", number_of_bytes_written_buf)[0]

            # check that the number of spikes written is smaller or the same as
            # the size of the memory region we allocated for spikes
            out_spike_bytes = sub_vertex_out_spike_bytes_function(
                subvertex, subvertex_slice)
            size_of_region = self.get_recording_region_size(out_spike_bytes)

            if number_of_bytes_written > size_of_region:
                raise exceptions.MemReadException(
                    "the amount of memory written ({}) was larger than was "
                    "allocated for it ({})".format(number_of_bytes_written,
                                                   size_of_region))

            # Read the spikes
            logger.debug("Reading {} ({}) bytes starting at {} + 4".format(
                number_of_bytes_written, hex(number_of_bytes_written),
                hex(spike_region_base_address)))
            spike_data = transceiver.read_memory(x, y,
                                                 spike_region_base_address + 4,
                                                 number_of_bytes_written)
            numpy_data = numpy.asarray(
                spike_data,
                dtype="uint8").view(dtype="uint32").byteswap().view("uint8")
            bits = numpy.fliplr(
                numpy.unpackbits(numpy_data).reshape((-1, 32))).reshape(
                    (-1, out_spike_bytes * 8))
            times, indices = numpy.where(bits == 1)
            times = times * ms_per_tick
            indices = indices + lo_atom
            spike_ids.append(indices)
            spike_times.append(times)
            progress_bar.update()

        progress_bar.end()
        spike_ids = numpy.hstack(spike_ids)
        spike_times = numpy.hstack(spike_times)
        result = numpy.dstack((spike_ids, spike_times))[0]
        return result[numpy.lexsort((spike_times, spike_ids))]
Пример #23
0
    def get_neuron_parameter(self, region, compatible_output, has_ran,
                             graph_mapper, placements, txrx, machine_time_step,
                             runtime):
        if not has_ran:
            raise exceptions.SpynnakerException(
                "The simulation has not yet ran, therefore neuron param "
                "cannot be retrieved")

        ms_per_tick = self._machine_time_step / 1000.0
        n_timesteps = runtime / ms_per_tick

        tempfilehandle = tempfile.NamedTemporaryFile()
        data = numpy.memmap(tempfilehandle.file,
                            shape=(n_timesteps, self._n_atoms),
                            dtype="float64,float64,float64")
        data["f0"] = (numpy.arange(self._n_atoms * n_timesteps) %
                      self._n_atoms).reshape((n_timesteps, self._n_atoms))
        data["f1"] = numpy.repeat(
            numpy.arange(0, n_timesteps * ms_per_tick, ms_per_tick),
            self._n_atoms).reshape((n_timesteps, self._n_atoms))

        # Find all the sub-vertices that this pynn_population.py exists on
        subvertices = graph_mapper.get_subvertices_from_vertex(self)
        progress_bar = ProgressBar(len(subvertices), "Getting recorded data")
        for subvertex in subvertices:
            placment = placements.get_placement_of_subvertex(subvertex)
            (x, y, p) = placment.x, placment.y, placment.p

            # Get the App Data for the core
            app_data_base_address = txrx.\
                get_cpu_information_from_core(x, y, p).user[0]

            # Get the position of the value buffer
            neuron_param_region_base_address_offset = \
                dsg_utility_calls.get_region_base_address_offset(
                    app_data_base_address, region)
            neuron_param_region_base_address_buf = txrx.read_memory(
                x, y, neuron_param_region_base_address_offset, 4)
            neuron_param_region_base_address = struct.unpack_from(
                "<I", neuron_param_region_base_address_buf)[0]
            neuron_param_region_base_address += app_data_base_address

            # Read the size
            number_of_bytes_written_buf = txrx.read_memory(
                x, y, neuron_param_region_base_address, 4)

            number_of_bytes_written = struct.unpack_from(
                "<I", number_of_bytes_written_buf)[0]

            # Read the values
            logger.debug("Reading {} ({}) bytes starting at {}".format(
                number_of_bytes_written, hex(number_of_bytes_written),
                hex(neuron_param_region_base_address + 4)))

            neuron_param_region_data = txrx.read_memory(
                x, y, neuron_param_region_base_address + 4,
                number_of_bytes_written)

            vertex_slice = graph_mapper.get_subvertex_slice(subvertex)

            bytes_per_time_step = vertex_slice.n_atoms * 4

            number_of_time_steps_written = \
                number_of_bytes_written / bytes_per_time_step

            logger.debug(
                "Processing {} timesteps".format(number_of_time_steps_written))

            numpy_data = (numpy.asarray(neuron_param_region_data,
                                        dtype="uint8").view(dtype="<i4") /
                          32767.0).reshape((n_timesteps, vertex_slice.n_atoms))
            data["f2"][:, vertex_slice.lo_atom:vertex_slice.hi_atom + 1] =\
                numpy_data
            progress_bar.update()

        progress_bar.end()
        data.shape = self._n_atoms * n_timesteps

        # Sort the data - apparently, using lexsort is faster, but it might
        # consume more memory, so the option is left open for sort-in-place
        order = numpy.lexsort((data["f1"], data["f0"]))
        # data.sort(order=['f0', 'f1'], axis=0)

        result = data.view(dtype="float64").reshape(
            (self._n_atoms * n_timesteps, 3))[order]
        return result
 def test_get_region_base_address_offset(self):
     val = get_region_base_address_offset(48, 7)
     self.assertEqual(val, 84)
    def get_data_for_vertex(self, x, y, p, region_to_read, state_region):
        """ Get a pointer to the data container for all the data retrieved\
            during the simulation from a specific region area of a core

        :param x: x coordinate of the chip
        :type x: int
        :param y: y coordinate of the chip
        :type y: int
        :param p: processor on the specified chip
        :type p: int
        :param region_to_read: desired data region
        :type region_to_read: int
        :param state_region: final state storage region
        :type state_region: int
        :return: pointer to a class which inherits from\
                AbstractBufferedDataStorage
        :rtype:\
                py:class:`spinn_front_end_common.interface.buffer_management.buffer_models.abstract_buffered_data_storage.AbstractBufferedDataStorage`
        """
        # flush data here
        if not self._received_data.is_data_from_region_flushed(
                x, y, p, region_to_read):
            if not self._received_data.is_end_buffering_state_recovered(
                    x, y, p):

                # Get the App Data for the core
                app_data_base_address = \
                    self._transceiver.get_cpu_information_from_core(
                        x, y, p).user[0]

                # Get the position of the buffer
                state_region_base_offset_address = \
                    dsg_utilities.get_region_base_address_offset(
                        app_data_base_address, state_region)
                state_region_base_address_buf = buffer(
                    self._transceiver.read_memory(
                        x, y, state_region_base_offset_address, 4))
                state_region_base_address = struct.unpack_from(
                    "<I", state_region_base_address_buf)[0]
                state_region_base_address += app_data_base_address

                # retrieve channel state memory area
                raw_number_of_channels = self._transceiver.read_memory(
                    x, y, state_region_base_address, 4)
                number_of_channels = struct.unpack(
                    "<I", str(raw_number_of_channels))[0]
                channel_state_data = str(self._transceiver.read_memory(
                    x, y, state_region_base_address,
                    EndBufferingState.size_of_region(number_of_channels)))
                end_buffering_state = EndBufferingState.create_from_bytearray(
                    channel_state_data)
                self._received_data.store_end_buffering_state(
                    x, y, p, end_buffering_state)
            else:
                end_buffering_state = self._received_data.\
                    get_end_buffering_state(x, y, p)

            end_state = end_buffering_state.get_state_for_region(
                region_to_read)
            start_ptr = end_state.start_address
            write_ptr = end_state.current_write
            end_ptr = end_state.end_address
            read_ptr = end_state.current_read

            # current read needs to be adjusted in case the last portion of the
            # memory has already been read, but the HostDataRead packet has not
            # been processed by the chip before simulation finished
            # This situation is identified by the sequence number of the last
            # packet sent to this core and the core internal state of the
            # output buffering finite state machine
            seq_no_last_ack_packet = \
                self._received_data.last_sequence_no_for_core(x, y, p)
            seq_no_internal_fsm = end_buffering_state.buffering_out_fsm_state
            if seq_no_internal_fsm == seq_no_last_ack_packet:

                # if the last ack packet has not been processed on the chip,
                # process it now
                last_sent_ack_sdp_packet = \
                    self._received_data.last_sent_packet_to_core(x, y, p)
                last_sent_ack_packet = create_eieio_command.\
                    read_eieio_command_message(
                        last_sent_ack_sdp_packet.data, 0)
                if not isinstance(last_sent_ack_packet, HostDataRead):
                    raise Exception(
                        "Something somewhere went terribly wrong - "
                        "I was looking for a HostDataRead packet, "
                        "while I got {0:s}".format(last_sent_ack_packet))
                for i in xrange(last_sent_ack_packet.n_requests):
                    if (region_to_read == last_sent_ack_packet.region_id(i) and
                            not end_state.is_state_updated):
                        read_ptr += last_sent_ack_packet.space_read(i)
                        if (read_ptr == write_ptr or
                                (read_ptr == end_ptr and
                                 write_ptr == start_ptr)):
                            end_state.update_last_operation(
                                spinn_front_end_constants.BUFFERING_OPERATIONS.
                                BUFFER_READ.value)
                        if read_ptr == end_ptr:
                            read_ptr = start_ptr
                        elif read_ptr > end_ptr:
                            raise Exception(
                                "Something somewhere went terribly wrong - "
                                "I was reading beyond the region area some "
                                "unknown data".format(
                                    last_sent_ack_packet))
                end_state.update_read_pointer(read_ptr)
                end_state.set_update_completed()

            # now state is updated, read back values for read pointer and
            # last operation performed
            last_operation = end_state.last_buffer_operation
            read_ptr = end_state.current_read

            # now read_ptr is updated, check memory to read
            if read_ptr < write_ptr:
                length = write_ptr - read_ptr
                data = self._transceiver.read_memory(x, y, read_ptr, length)
                self._received_data.flushing_data_from_region(
                    x, y, p, region_to_read, data)

            elif read_ptr > write_ptr:
                length = end_ptr - read_ptr
                data = self._transceiver.read_memory(x, y, read_ptr, length)
                self._received_data.store_data_in_region_buffer(
                    x, y, p, region_to_read, data)
                read_ptr = start_ptr
                length = write_ptr - read_ptr
                data = self._transceiver.read_memory(x, y, read_ptr, length)
                self._received_data.flushing_data_from_region(
                    x, y, p, region_to_read, data)

            elif (read_ptr == write_ptr and
                    last_operation == spinn_front_end_constants.
                    BUFFERING_OPERATIONS.BUFFER_WRITE.value):
                length = end_ptr - read_ptr
                data = self._transceiver.read_memory(x, y, read_ptr, length)
                self._received_data.store_data_in_region_buffer(
                    x, y, p, region_to_read, data)
                read_ptr = start_ptr
                length = write_ptr - read_ptr
                data = self._transceiver.read_memory(x, y, read_ptr, length)
                self._received_data.flushing_data_from_region(
                    x, y, p, region_to_read, data)

            elif (read_ptr == write_ptr and
                    last_operation == spinn_front_end_constants.
                    BUFFERING_OPERATIONS.BUFFER_READ.value):
                data = bytearray()
                self._received_data.flushing_data_from_region(
                    x, y, p, region_to_read, data)

        # data flush has been completed - return appropriate data
        # the two returns can be exchanged - one returns data and the other
        # returns a pointer to the structure holding the data
        return self._received_data.get_region_data_pointer(
            x, y, p, region_to_read)
Пример #26
0
    def get_spikes(
            self, txrx, placements, graph_mapper, compatible_output=False):
        """
        Return a 2-column numpy array containing cell ids and spike times for
        recorded cells.   This is read directly from the memory for the board.

        :param transceiver:
        :param placements:
        :param graph_mapper:
        :param compatible_output:
        """

        logger.info("Getting spikes for {}".format(self._label))

        # Find all the sub-vertices that this pynn_population.py exists on
        subvertices = graph_mapper.get_subvertices_from_vertex(self)
        progress_bar = ProgressBar(len(subvertices), "Getting spikes")
        results = list()
        for subvertex in subvertices:
            placement = placements.get_placement_of_subvertex(subvertex)
            (x, y, p) = placement.x, placement.y, placement.p
            subvertex_slice = graph_mapper.get_subvertex_slice(subvertex)
            lo_atom = subvertex_slice.lo_atom
            hi_atom = subvertex_slice.hi_atom

            logger.debug("Reading spikes from chip {}, {}, core {}, "
                         "lo_atom {} hi_atom {}".format(
                             x, y, p, lo_atom, hi_atom))

            # Get the App Data for the core
            app_data_base_address = \
                txrx.get_cpu_information_from_core(x, y, p).user[0]

            # Get the position of the spike buffer
            spike_region_base_address_offset = \
                dsg_utility_calls.get_region_base_address_offset(
                    app_data_base_address,
                    self._SPIKE_SOURCE_REGIONS
                    .SPIKE_DATA_RECORDED_REGION.value)
            spike_region_base_address_buf = buffer(txrx.read_memory(
                x, y, spike_region_base_address_offset, 4))
            spike_region_base_address = struct.unpack_from(
                "<I", spike_region_base_address_buf)[0]
            spike_region_base_address += app_data_base_address

            # Read the spike data size
            number_of_bytes_written_buf = buffer(txrx.read_memory(
                x, y, spike_region_base_address, 4))
            number_of_bytes_written = struct.unpack_from(
                "<I", number_of_bytes_written_buf)[0]

            # check that the number of spikes written is smaller or the same as
            # the size of the memory region we allocated for spikes
            send_buffer = self._get_spike_send_buffer(subvertex_slice)
            if number_of_bytes_written > send_buffer.total_region_size:
                raise exceptions.MemReadException(
                    "the amount of memory written ({}) was larger than was "
                    "allocated for it ({})"
                    .format(number_of_bytes_written,
                            send_buffer.total_region_size))

            # Read the spikes
            logger.debug("Reading {} ({}) bytes starting at {} + 4"
                         .format(number_of_bytes_written,
                                 hex(number_of_bytes_written),
                                 hex(spike_region_base_address)))
            spike_data_block = txrx.read_memory(
                x, y, spike_region_base_address + 4, number_of_bytes_written)

            # translate block of spikes into EIEIO messages
            offset = 0
            while offset <= number_of_bytes_written - 4:
                eieio_header = EIEIODataHeader.from_bytestring(
                    spike_data_block, offset)
                offset += eieio_header.size
                timestamps = numpy.repeat([eieio_header.payload_base],
                                          eieio_header.count)
                keys = numpy.frombuffer(
                    spike_data_block, dtype="<u4", count=eieio_header.count,
                    offset=offset)
                neuron_ids = ((keys - subvertex.base_key) +
                              subvertex_slice.lo_atom)
                offset += eieio_header.count * 4
                results.append(numpy.dstack((neuron_ids, timestamps))[0])

            # complete the buffer
            progress_bar.update()
        progress_bar.end()

        result = numpy.vstack(results)
        result = result[numpy.lexsort((result[:, 1], result[:, 0]))]
        return result
Пример #27
0
    def _retrieve_synaptic_block(
            self, placements, transceiver, pre_subvertex, pre_n_atoms,
            post_subvertex, routing_infos, subgraph):
        """
        reads in a synaptic block from a given processor and subvertex on the
        machine.
        """
        post_placement = placements.get_placement_of_subvertex(post_subvertex)
        post_x, post_y, post_p = \
            post_placement.x, post_placement.y, post_placement.p

        # either read in the master pop table or retrieve it from storage
        master_pop_base_mem_address, app_data_base_address = \
            self._population_table_type.locate_master_pop_table_base_address(
                post_x, post_y, post_p, transceiver,
                constants.POPULATION_BASED_REGIONS.POPULATION_TABLE.value)

        incoming_edges = subgraph.incoming_subedges_from_subvertex(
            post_subvertex)
        incoming_key_combo = None
        for subedge in incoming_edges:
            if subedge.pre_subvertex == pre_subvertex:
                routing_info = \
                    routing_infos.get_subedge_information_from_subedge(subedge)
                keys_and_masks = routing_info.keys_and_masks
                incoming_key_combo = keys_and_masks[0].key
                break

        maxed_row_length, synaptic_block_base_address_offset = \
            self._population_table_type.extract_synaptic_matrix_data_location(
                incoming_key_combo, master_pop_base_mem_address,
                transceiver, post_x, post_y)

        block = None
        if maxed_row_length > 0:

            # calculate the synaptic block size in words
            synaptic_block_size = (pre_n_atoms * 4 *
                                   (constants.SYNAPTIC_ROW_HEADER_WORDS +
                                    maxed_row_length))

            # read in the base address of the synaptic matrix in the app region
            # table
            synapse_region_base_address_location = \
                dsg_utilities.get_region_base_address_offset(
                    app_data_base_address,
                    constants.POPULATION_BASED_REGIONS.SYNAPTIC_MATRIX.value)

            # read in the memory address of the synaptic_region base address
            synapse_region_base_address = helpful_functions.read_data(
                post_x, post_y, synapse_region_base_address_location, 4,
                "<I", transceiver)

            # the base address of the synaptic block in absolute terms is the
            # app base, plus the synaptic matrix base plus the offset
            synaptic_block_base_address = (app_data_base_address +
                                           synapse_region_base_address +
                                           synaptic_block_base_address_offset)

            # read in and return the synaptic block
            block = transceiver.read_memory(
                post_x, post_y, synaptic_block_base_address,
                synaptic_block_size)

            if len(block) != synaptic_block_size:
                raise exceptions.SynapticBlockReadException(
                    "Not enough data has been read"
                    " (aka, something funkky happened)")
        return block, maxed_row_length