def _execute(txrx, machine, app_id, x, y, p, data_spec_path):
        # pylint: disable=too-many-arguments, too-many-locals

        # build specification reader
        reader = FileDataReader(data_spec_path)

        # maximum available memory
        # however system updates the memory available
        # independently, so the check on the space available actually
        # happens when memory is allocated

        # generate data spec executor
        executor = DataSpecificationExecutor(
            reader,
            machine.get_chip_at(x, y).sdram.size)

        # run data spec executor
        try:
            # bytes_used_by_spec, bytes_written_by_spec = \
            executor.execute()
        except DataSpecificationException:
            logger.error("Error executing data specification for {}, {}, {}",
                         x, y, p)
            raise

        bytes_used_by_spec = executor.get_constructed_data_size()

        # allocate memory where the app data is going to be written; this
        # raises an exception in case there is not enough SDRAM to allocate
        start_address = txrx.malloc_sdram(x, y, bytes_used_by_spec, app_id)

        # Write the header and pointer table and load it
        header = executor.get_header()
        pointer_table = executor.get_pointer_table(start_address)
        data_to_write = numpy.concatenate((header, pointer_table)).tostring()
        txrx.write_memory(x, y, start_address, data_to_write)
        bytes_written_by_spec = len(data_to_write)

        # Write each region
        for region_id in _MEM_REGIONS:
            region = executor.get_region(region_id)
            if region is not None:
                max_pointer = region.max_write_pointer
                if not region.unfilled and max_pointer > 0:
                    # Get the data up to what has been written
                    data = region.region_data[:max_pointer]

                    # Write the data to the position
                    txrx.write_memory(x, y, pointer_table[region_id], data)
                    bytes_written_by_spec += len(data)

        # set user 0 register appropriately to the application data
        write_address_to_user0(txrx, x, y, p, start_address)
        return {
            'start_address': start_address,
            'memory_used': bytes_used_by_spec,
            'memory_written': bytes_written_by_spec
        }
    def test_trivial_spec(self):
        temp_spec = mktemp()
        spec = DataSpecificationGenerator(io.FileIO(temp_spec, "w"))
        spec.end_specification()

        executor = DataSpecificationExecutor(io.FileIO(temp_spec, "r"), 400)
        executor.execute()
        for r in range(constants.MAX_MEM_REGIONS):
            self.assertIsNone(executor.get_region(r))
    def __python_execute(self, core, reader, writer_func, base_address,
                         size_allocated):
        """
        :param tuple(int,int,int) core:
        :param ~.AbstractDataReader reader:
        :param callable(tuple(int,int,int,bytearray),None) writer_func:
        :param int base_address:
        :param int size_allocated:
        :rtype: DataWritten
        """
        x, y, p = core

        # Maximum available memory.
        # However, system updates the memory available independently, so the
        # space available check actually happens when memory is allocated.
        memory_available = self._machine.get_chip_at(x, y).sdram.size

        # generate data spec executor
        executor = DataSpecificationExecutor(reader, memory_available)

        # run data spec executor
        try:
            executor.execute()
        except DataSpecificationException:
            logger.error("Error executing data specification for {}, {}, {}",
                         x, y, p)
            raise

        # Do the actual writing ------------------------------------

        # Write the header and pointer table
        header = executor.get_header()
        pointer_table = executor.get_pointer_table(base_address)
        data_to_write = numpy.concatenate((header, pointer_table)).tostring()

        # NB: DSE meta-block is always small (i.e., one SDP write)
        self._txrx.write_memory(x, y, base_address, data_to_write)
        bytes_written = len(data_to_write)

        # Write each region
        for region_id in _MEM_REGIONS:
            region = executor.get_region(region_id)
            if region is None:
                continue
            max_pointer = region.max_write_pointer
            if region.unfilled or max_pointer == 0:
                continue

            # Get the data up to what has been written
            data = region.region_data[:max_pointer]

            # Write the data to the position
            writer_func(x, y, pointer_table[region_id], data)
            bytes_written += len(data)

        return DataWritten(base_address, size_allocated, bytes_written)
    def __execute(self, core, reader, writer_func):
        x, y, p = core

        # Maximum available memory.
        # However, system updates the memory available independently, so the
        # space available check actually happens when memory is allocated.
        memory_available = self._machine.get_chip_at(x, y).sdram.size

        # generate data spec executor
        executor = DataSpecificationExecutor(reader, memory_available)

        # run data spec executor
        try:
            executor.execute()
        except DataSpecificationException:
            logger.error("Error executing data specification for {}, {}, {}",
                         x, y, p)
            raise

        bytes_allocated = executor.get_constructed_data_size()

        # allocate memory where the app data is going to be written; this
        # raises an exception in case there is not enough SDRAM to allocate
        start_address = self._txrx.malloc_sdram(x, y, bytes_allocated,
                                                self._app_id)

        # Do the actual writing ------------------------------------

        # Write the header and pointer table
        header = executor.get_header()
        pointer_table = executor.get_pointer_table(start_address)
        data_to_write = numpy.concatenate((header, pointer_table)).tostring()
        # NB: DSE meta-block is always small (i.e., one SDP write)
        self._txrx.write_memory(x, y, start_address, data_to_write)
        bytes_written = len(data_to_write)

        # Write each region
        for region_id in _MEM_REGIONS:
            region = executor.get_region(region_id)
            if region is None:
                continue
            max_pointer = region.max_write_pointer
            if region.unfilled or max_pointer == 0:
                continue

            # Get the data up to what has been written
            data = region.region_data[:max_pointer]

            # Write the data to the position
            writer_func(x, y, pointer_table[region_id], data)
            bytes_written += len(data)

        # set user 0 register appropriately to the application data
        write_address_to_user0(self._txrx, x, y, p, start_address)

        return DataWritten(start_address, bytes_allocated, bytes_written)
    def test_complex_spec(self):
        # Create a sdram just to set max chip size
        SDRAM(1000)
        temp_spec = mktemp()
        spec = DataSpecificationGenerator(io.FileIO(temp_spec, "w"))
        spec.reserve_memory_region(0, 44)
        spec.switch_write_focus(0)
        spec.set_register_value(3, 0x31323341)
        spec.write_value_from_register(3)
        spec.set_register_value(3, 0x31323342)
        spec.write_value_from_register(3)
        spec.set_register_value(3, 0x31323344)
        spec.write_value_from_register(3)
        spec.set_register_value(3, 0x31323347)
        spec.write_value_from_register(3)
        spec.set_register_value(3, 0x3132334B)
        spec.write_value_from_register(3)
        spec.set_register_value(2, 24)
        spec.set_write_pointer(2, address_is_register=True)
        spec.write_array([0x61, 0x62, 0x63, 0x64], data_type=DataType.UINT8)
        spec.set_register_value(5, 4)
        spec.write_repeated_value(0x70,
                                  5,
                                  repeats_is_register=True,
                                  data_type=DataType.UINT8)
        spec.write_value(0x7d, data_type=DataType.INT64)
        spec.end_specification()

        executor = DataSpecificationExecutor(io.FileIO(temp_spec, "r"), 400)
        executor.execute()
        r = executor.get_region(0)
        self.assertEqual(r.allocated_size, 44)
        self.assertEqual(r.max_write_pointer, 40)
        self.assertFalse(r.unfilled)
        self.assertEqual(
            r.region_data,
            bytearray("A321"
                      "B321"
                      "D321"
                      "G321"
                      "K321"
                      "\0\0\0\0"
                      "abcd"
                      "pppp"
                      "}\0\0\0"
                      "\0\0\0\0"
                      "\0\0\0\0".encode("ISO 8859-1")))
예제 #6
0
    def test_write_synaptic_matrix_and_master_population_table(self):
        MockSimulator.setup()

        default_config_paths = os.path.join(
            os.path.dirname(abstract_spinnaker_common.__file__),
            AbstractSpiNNakerCommon.CONFIG_FILE_NAME)

        config = conf_loader.load_config(
            AbstractSpiNNakerCommon.CONFIG_FILE_NAME, default_config_paths)
        config.set("Simulation", "one_to_one_connection_dtcm_max_bytes", 40)

        machine_time_step = 1000.0

        pre_app_vertex = SimpleApplicationVertex(10)
        pre_vertex = SimpleMachineVertex(resources=None)
        pre_vertex_slice = Slice(0, 9)
        post_app_vertex = SimpleApplicationVertex(10)
        post_vertex = SimpleMachineVertex(resources=None)
        post_vertex_slice = Slice(0, 9)
        post_slice_index = 0
        one_to_one_connector_1 = OneToOneConnector(None)
        one_to_one_connector_1.set_projection_information(
            pre_app_vertex, post_app_vertex, None, machine_time_step)
        one_to_one_connector_1.set_weights_and_delays(1.5, 1.0)
        one_to_one_connector_2 = OneToOneConnector(None)
        one_to_one_connector_2.set_projection_information(
            pre_app_vertex, post_app_vertex, None, machine_time_step)
        one_to_one_connector_2.set_weights_and_delays(2.5, 2.0)
        all_to_all_connector = AllToAllConnector(None)
        all_to_all_connector.set_projection_information(
            pre_app_vertex, post_app_vertex, None, machine_time_step)
        all_to_all_connector.set_weights_and_delays(4.5, 4.0)
        direct_synapse_information_1 = SynapseInformation(
            one_to_one_connector_1, SynapseDynamicsStatic(), 0)
        direct_synapse_information_2 = SynapseInformation(
            one_to_one_connector_2, SynapseDynamicsStatic(), 1)
        all_to_all_synapse_information = SynapseInformation(
            all_to_all_connector, SynapseDynamicsStatic(), 0)
        app_edge = ProjectionApplicationEdge(pre_app_vertex, post_app_vertex,
                                             direct_synapse_information_1)
        app_edge.add_synapse_information(direct_synapse_information_2)
        app_edge.add_synapse_information(all_to_all_synapse_information)
        machine_edge = ProjectionMachineEdge(app_edge.synapse_information,
                                             pre_vertex, post_vertex)
        partition_name = "TestPartition"

        graph = MachineGraph("Test")
        graph.add_vertex(pre_vertex)
        graph.add_vertex(post_vertex)
        graph.add_edge(machine_edge, partition_name)

        graph_mapper = GraphMapper()
        graph_mapper.add_vertex_mapping(pre_vertex, pre_vertex_slice,
                                        pre_app_vertex)
        graph_mapper.add_vertex_mapping(post_vertex, post_vertex_slice,
                                        post_app_vertex)
        graph_mapper.add_edge_mapping(machine_edge, app_edge)

        weight_scales = [4096.0, 4096.0]

        key = 0
        routing_info = RoutingInfo()
        routing_info.add_partition_info(
            PartitionRoutingInfo(
                [BaseKeyAndMask(key, 0xFFFFFFF0)],
                graph.get_outgoing_edge_partition_starting_at_vertex(
                    pre_vertex, partition_name)))

        temp_spec = tempfile.mktemp()
        spec_writer = FileDataWriter(temp_spec)
        spec = DataSpecificationGenerator(spec_writer, None)
        master_pop_sz = 1000
        master_pop_region = 0
        all_syn_block_sz = 2000
        synapse_region = 1
        spec.reserve_memory_region(master_pop_region, master_pop_sz)
        spec.reserve_memory_region(synapse_region, all_syn_block_sz)

        synapse_type = MockSynapseType()

        synaptic_manager = SynapticManager(synapse_type=synapse_type,
                                           ring_buffer_sigma=5.0,
                                           spikes_per_second=100.0,
                                           config=config)
        synaptic_manager._write_synaptic_matrix_and_master_population_table(
            spec, [post_vertex_slice], post_slice_index, post_vertex,
            post_vertex_slice, all_syn_block_sz, weight_scales,
            master_pop_region, synapse_region, routing_info, graph_mapper,
            graph, machine_time_step)
        spec.end_specification()
        spec_writer.close()

        spec_reader = FileDataReader(temp_spec)
        executor = DataSpecificationExecutor(spec_reader,
                                             master_pop_sz + all_syn_block_sz)
        executor.execute()

        master_pop_table = executor.get_region(0)
        synaptic_matrix = executor.get_region(1)

        all_data = bytearray()
        all_data.extend(
            master_pop_table.region_data[:master_pop_table.max_write_pointer])
        all_data.extend(
            synaptic_matrix.region_data[:synaptic_matrix.max_write_pointer])
        master_pop_table_address = 0
        synaptic_matrix_address = master_pop_table.max_write_pointer
        direct_synapses_address = struct.unpack_from(
            "<I", synaptic_matrix.region_data)[0]
        direct_synapses_address += synaptic_matrix_address + 8
        indirect_synapses_address = synaptic_matrix_address + 4
        placement = Placement(None, 0, 0, 1)
        transceiver = MockTransceiverRawData(all_data)

        # Get the master population table details
        items = synaptic_manager._poptable_type\
            .extract_synaptic_matrix_data_location(
                key, master_pop_table_address, transceiver,
                placement.x, placement.y)

        # The first entry should be direct, but the rest should be indirect;
        # the second is potentially direct, but has been restricted by the
        # restriction on the size of the direct matrix
        assert len(items) == 3

        # TODO: This has been changed because direct matrices are disabled!
        assert not items[0][2]
        assert not items[1][2]
        assert not items[2][2]

        data_1, row_len_1 = synaptic_manager._retrieve_synaptic_block(
            transceiver=transceiver,
            placement=placement,
            master_pop_table_address=master_pop_table_address,
            indirect_synapses_address=indirect_synapses_address,
            direct_synapses_address=direct_synapses_address,
            key=key,
            n_rows=pre_vertex_slice.n_atoms,
            index=0,
            using_extra_monitor_cores=False)
        connections_1 = synaptic_manager._synapse_io.read_synapses(
            direct_synapse_information_1, pre_vertex_slice, post_vertex_slice,
            row_len_1, 0, 2, weight_scales, data_1, None,
            app_edge.n_delay_stages, machine_time_step)

        # The first matrix is a 1-1 matrix, so row length is 1
        assert row_len_1 == 1

        # Check that all the connections have the right weight and delay
        assert len(connections_1) == post_vertex_slice.n_atoms
        assert all([conn["weight"] == 1.5 for conn in connections_1])
        assert all([conn["delay"] == 1.0 for conn in connections_1])

        data_2, row_len_2 = synaptic_manager._retrieve_synaptic_block(
            transceiver=transceiver,
            placement=placement,
            master_pop_table_address=master_pop_table_address,
            indirect_synapses_address=indirect_synapses_address,
            direct_synapses_address=direct_synapses_address,
            key=key,
            n_rows=pre_vertex_slice.n_atoms,
            index=1,
            using_extra_monitor_cores=False)
        connections_2 = synaptic_manager._synapse_io.read_synapses(
            direct_synapse_information_2, pre_vertex_slice, post_vertex_slice,
            row_len_2, 0, 2, weight_scales, data_2, None,
            app_edge.n_delay_stages, machine_time_step)

        # The second matrix is a 1-1 matrix, so row length is 1
        assert row_len_2 == 1

        # Check that all the connections have the right weight and delay
        assert len(connections_2) == post_vertex_slice.n_atoms
        assert all([conn["weight"] == 2.5 for conn in connections_2])
        assert all([conn["delay"] == 2.0 for conn in connections_2])

        data_3, row_len_3 = synaptic_manager._retrieve_synaptic_block(
            transceiver=transceiver,
            placement=placement,
            master_pop_table_address=master_pop_table_address,
            indirect_synapses_address=indirect_synapses_address,
            direct_synapses_address=direct_synapses_address,
            key=key,
            n_rows=pre_vertex_slice.n_atoms,
            index=2,
            using_extra_monitor_cores=False)
        connections_3 = synaptic_manager._synapse_io.read_synapses(
            all_to_all_synapse_information, pre_vertex_slice,
            post_vertex_slice, row_len_3, 0, 2, weight_scales, data_3, None,
            app_edge.n_delay_stages, machine_time_step)

        # The third matrix is an all-to-all matrix, so length is n_atoms
        assert row_len_3 == post_vertex_slice.n_atoms

        # Check that all the connections have the right weight and delay
        assert len(connections_3) == \
            post_vertex_slice.n_atoms * pre_vertex_slice.n_atoms
        assert all([conn["weight"] == 4.5 for conn in connections_3])
        assert all([conn["delay"] == 4.0 for conn in connections_3])
예제 #7
0
    def __call__(self, transceiver, machine, app_id, dsg_targets):
        """

        :param machine: the python representation of the spinnaker machine
        :param transceiver: the spinnman instance
        :param app_id: the application ID of the simulation
        :param dsg_targets: map of placement to file path

        :return: map of placement and dsg data, and loaded data flag.
        """
        processor_to_app_data_base_address = dict()

        # create a progress bar for end users
        progress = ProgressBar(
            dsg_targets, "Executing data specifications and loading data")

        for ((x, y, p),
             data_spec_file_path) in progress.over(dsg_targets.iteritems()):

            # build specification reader
            data_spec_file_path = dsg_targets[x, y, p]
            data_spec_reader = FileDataReader(data_spec_file_path)

            # maximum available memory
            # however system updates the memory available
            # independently, so the check on the space available actually
            # happens when memory is allocated
            chip = machine.get_chip_at(x, y)
            memory_available = chip.sdram.size

            # generate data spec executor
            executor = DataSpecificationExecutor(data_spec_reader,
                                                 memory_available)

            # run data spec executor
            try:
                # bytes_used_by_spec, bytes_written_by_spec = \
                executor.execute()
            except DataSpecificationException as e:
                logger.error(
                    "Error executing data specification for {}, {}, {}".format(
                        x, y, p))
                raise e

            bytes_used_by_spec = executor.get_constructed_data_size()

            # allocate memory where the app data is going to be written
            # this raises an exception in case there is not enough
            # SDRAM to allocate
            start_address = transceiver.malloc_sdram(x, y, bytes_used_by_spec,
                                                     app_id)

            # Write the header and pointer table and load it
            header = executor.get_header()
            pointer_table = executor.get_pointer_table(start_address)
            data_to_write = numpy.concatenate(
                (header, pointer_table)).tostring()
            transceiver.write_memory(x, y, start_address, data_to_write)
            bytes_written_by_spec = len(data_to_write)

            # Write each region
            for region_id in range(constants.MAX_MEM_REGIONS):
                region = executor.get_region(region_id)
                if region is not None:

                    max_pointer = region.max_write_pointer
                    if not region.unfilled and max_pointer > 0:

                        # Get the data up to what has been written
                        data = region.region_data[:max_pointer]

                        # Write the data to the position
                        position = pointer_table[region_id]
                        transceiver.write_memory(x, y, position, data)
                        bytes_written_by_spec += len(data)

            # set user 0 register appropriately to the application data
            user_0_address = \
                transceiver.get_user_0_register_address_from_core(x, y, p)
            start_address_encoded = \
                buffer(struct.pack("<I", start_address))
            transceiver.write_memory(x, y, user_0_address,
                                     start_address_encoded)

            # write information for the memory map report
            processor_to_app_data_base_address[x, y, p] = {
                'start_address': start_address,
                'memory_used': bytes_used_by_spec,
                'memory_written': bytes_written_by_spec
            }

        return processor_to_app_data_base_address, True
    def test_simple_spec(self):

        # Create a sdram just to set max chip size
        SDRAM(1000)
        # Write a data spec to execute
        temp_spec = mktemp()
        spec_writer = io.FileIO(temp_spec, "w")
        spec = DataSpecificationGenerator(spec_writer)
        spec.reserve_memory_region(0, 100)
        spec.reserve_memory_region(1, 200, empty=True)
        spec.reserve_memory_region(2, 4)
        spec.reserve_memory_region(3, 12, reference=1)
        spec.reference_memory_region(4, 2)
        spec.switch_write_focus(0)
        spec.write_array([0, 1, 2])
        spec.set_write_pointer(20)
        spec.write_value(4)
        spec.switch_write_focus(2)
        spec.write_value(3)
        spec.set_write_pointer(0)
        spec.write_value(10)
        spec.end_specification()

        # Execute the spec
        spec_reader = io.FileIO(temp_spec, "r")
        executor = DataSpecificationExecutor(spec_reader, 400)
        executor.execute()

        # Test the size
        header_and_table_size = (constants.MAX_MEM_REGIONS + 2) * 4
        self.assertEqual(executor.get_constructed_data_size(),
                         header_and_table_size + 100 + 200 + 4 + 12)

        # Test the unused regions
        for region in range(5, constants.MAX_MEM_REGIONS):
            self.assertIsNone(executor.get_region(region))

        # Test region 0
        region_0 = executor.get_region(0)
        self.assertEqual(region_0.allocated_size, 100)
        self.assertEqual(region_0.max_write_pointer, 24)
        self.assertFalse(region_0.unfilled)
        self.assertIsNone(region_0.reference)
        self.assertEqual(region_0.region_data[:region_0.max_write_pointer],
                         struct.pack("<IIIIII", 0, 1, 2, 0, 0, 4))

        # Test region 1
        region_1 = executor.get_region(1)
        self.assertIsInstance(region_1, MemoryRegionReal)
        self.assertEqual(region_1.allocated_size, 200)
        self.assertTrue(region_1.unfilled)
        self.assertIsNone(region_1.reference)

        # Test region 2
        region_2 = executor.get_region(2)
        self.assertIsInstance(region_2, MemoryRegionReal)
        self.assertEqual(region_2.allocated_size, 4)
        self.assertIsNone(region_2.reference)
        self.assertEqual(region_2.region_data, struct.pack("<I", 10))

        # Test region 3
        region_3 = executor.get_region(3)
        self.assertIsInstance(region_3, MemoryRegionReal)
        self.assertEqual(region_3.allocated_size, 12)
        self.assertEqual(region_3.reference, 1)
        self.assertEqual(executor.referenceable_regions, [3])

        # Test region 4
        region_4 = executor.get_region(4)
        self.assertIsInstance(region_4, MemoryRegionReference)
        self.assertEqual(region_4.ref, 2)
        self.assertEqual(executor.references_to_fill, [4])

        # Test the pointer table
        table = executor.get_pointer_table(0)
        self.assertEqual(len(table), constants.MAX_MEM_REGIONS)
        self.assertEqual(table[0], header_and_table_size)
        self.assertEqual(table[1], header_and_table_size + 100)
        self.assertEqual(table[2], header_and_table_size + 300)
        self.assertEqual(table[3], header_and_table_size + 304)
        # 4 is also 0 because it is a reference
        for region in range(4, constants.MAX_MEM_REGIONS):
            self.assertEqual(table[region], 0)

        # Test the header
        header = executor.get_header()
        self.assertEqual(len(header), 2)
        self.assertEqual(header[0], constants.APPDATA_MAGIC_NUM)
        self.assertEqual(header[1], constants.DSE_VERSION)
    def test_write_synaptic_matrix_and_master_population_table(self):
        MockSimulator.setup()
        # Add an sdram so maxsdram is high enough
        SDRAM(10000)

        default_config_paths = os.path.join(
            os.path.dirname(abstract_spinnaker_common.__file__),
            AbstractSpiNNakerCommon.CONFIG_FILE_NAME)

        config = conf_loader.load_config(
            AbstractSpiNNakerCommon.CONFIG_FILE_NAME, default_config_paths)
        config.set("Simulation", "one_to_one_connection_dtcm_max_bytes", 40)

        machine_time_step = 1000.0

        pre_app_vertex = SimpleApplicationVertex(10)
        pre_vertex = SimpleMachineVertex(resources=None)
        pre_vertex_slice = Slice(0, 9)
        post_app_vertex = SimpleApplicationVertex(10)
        post_vertex = SimpleMachineVertex(resources=None)
        post_vertex_slice = Slice(0, 9)
        post_slice_index = 0
        one_to_one_connector_1 = OneToOneConnector(None)
        one_to_one_connector_1.set_projection_information(
            pre_app_vertex, post_app_vertex, None, machine_time_step)
        one_to_one_connector_2 = OneToOneConnector(None)
        one_to_one_connector_2.set_projection_information(
            pre_app_vertex, post_app_vertex, None, machine_time_step)
        all_to_all_connector = AllToAllConnector(None)
        all_to_all_connector.set_projection_information(
            pre_app_vertex, post_app_vertex, None, machine_time_step)
        direct_synapse_information_1 = SynapseInformation(
            one_to_one_connector_1, SynapseDynamicsStatic(), 0, 1.5, 1.0)
        direct_synapse_information_2 = SynapseInformation(
            one_to_one_connector_2, SynapseDynamicsStatic(), 1, 2.5, 2.0)
        all_to_all_synapse_information = SynapseInformation(
            all_to_all_connector, SynapseDynamicsStatic(), 0, 4.5, 4.0)
        app_edge = ProjectionApplicationEdge(
            pre_app_vertex, post_app_vertex, direct_synapse_information_1)
        app_edge.add_synapse_information(direct_synapse_information_2)
        app_edge.add_synapse_information(all_to_all_synapse_information)
        machine_edge = ProjectionMachineEdge(
            app_edge.synapse_information, pre_vertex, post_vertex)
        partition_name = "TestPartition"

        graph = MachineGraph("Test")
        graph.add_vertex(pre_vertex)
        graph.add_vertex(post_vertex)
        graph.add_edge(machine_edge, partition_name)

        graph_mapper = GraphMapper()
        graph_mapper.add_vertex_mapping(
            pre_vertex, pre_vertex_slice, pre_app_vertex)
        graph_mapper.add_vertex_mapping(
            post_vertex, post_vertex_slice, post_app_vertex)
        graph_mapper.add_edge_mapping(machine_edge, app_edge)

        weight_scales = [4096.0, 4096.0]

        key = 0
        routing_info = RoutingInfo()
        routing_info.add_partition_info(PartitionRoutingInfo(
            [BaseKeyAndMask(key, 0xFFFFFFF0)],
            graph.get_outgoing_edge_partition_starting_at_vertex(
                pre_vertex, partition_name)))

        temp_spec = tempfile.mktemp()
        spec_writer = FileDataWriter(temp_spec)
        spec = DataSpecificationGenerator(spec_writer, None)
        master_pop_sz = 1000
        master_pop_region = 0
        all_syn_block_sz = 2000
        synapse_region = 1
        direct_region = 2
        spec.reserve_memory_region(master_pop_region, master_pop_sz)
        spec.reserve_memory_region(synapse_region, all_syn_block_sz)

        synaptic_manager = SynapticManager(
            n_synapse_types=2, ring_buffer_sigma=5.0,
            spikes_per_second=100.0, config=config)
        # UGLY but the mock transceiver NEED generate_on_machine be False
        abstract_generate_connector_on_machine.IS_PYNN_8 = False
        synaptic_manager._write_synaptic_matrix_and_master_population_table(
            spec, [post_vertex_slice], post_slice_index, post_vertex,
            post_vertex_slice, all_syn_block_sz, weight_scales,
            master_pop_region, synapse_region, direct_region, routing_info,
            graph_mapper, graph, machine_time_step)
        spec.end_specification()
        spec_writer.close()

        spec_reader = FileDataReader(temp_spec)
        executor = DataSpecificationExecutor(
            spec_reader, master_pop_sz + all_syn_block_sz)
        executor.execute()

        master_pop_table = executor.get_region(0)
        synaptic_matrix = executor.get_region(1)
        direct_matrix = executor.get_region(2)

        all_data = bytearray()
        all_data.extend(master_pop_table.region_data[
            :master_pop_table.max_write_pointer])
        all_data.extend(synaptic_matrix.region_data[
            :synaptic_matrix.max_write_pointer])
        all_data.extend(direct_matrix.region_data[
            :direct_matrix.max_write_pointer])
        master_pop_table_address = 0
        synaptic_matrix_address = master_pop_table.max_write_pointer
        direct_synapses_address = (
            synaptic_matrix_address + synaptic_matrix.max_write_pointer)
        direct_synapses_address += 4
        indirect_synapses_address = synaptic_matrix_address
        placement = Placement(None, 0, 0, 1)
        transceiver = MockTransceiverRawData(all_data)

        # Get the master population table details
        items = synaptic_manager._poptable_type\
            .extract_synaptic_matrix_data_location(
                key, master_pop_table_address, transceiver,
                placement.x, placement.y)

        # The first entry should be direct, but the rest should be indirect;
        # the second is potentially direct, but has been restricted by the
        # restriction on the size of the direct matrix
        assert len(items) == 3
        assert items[0][2]
        assert not items[1][2]
        assert not items[2][2]

        data_1, row_len_1 = synaptic_manager._retrieve_synaptic_block(
            transceiver=transceiver, placement=placement,
            master_pop_table_address=master_pop_table_address,
            indirect_synapses_address=indirect_synapses_address,
            direct_synapses_address=direct_synapses_address, key=key,
            n_rows=pre_vertex_slice.n_atoms, index=0,
            using_extra_monitor_cores=False)
        connections_1 = synaptic_manager._synapse_io.read_synapses(
            direct_synapse_information_1, pre_vertex_slice, post_vertex_slice,
            row_len_1, 0, 2, weight_scales, data_1, None,
            app_edge.n_delay_stages, machine_time_step)

        # The first matrix is a 1-1 matrix, so row length is 1
        assert row_len_1 == 1

        # Check that all the connections have the right weight and delay
        assert len(connections_1) == post_vertex_slice.n_atoms
        assert all([conn["weight"] == 1.5 for conn in connections_1])
        assert all([conn["delay"] == 1.0 for conn in connections_1])

        data_2, row_len_2 = synaptic_manager._retrieve_synaptic_block(
            transceiver=transceiver, placement=placement,
            master_pop_table_address=master_pop_table_address,
            indirect_synapses_address=indirect_synapses_address,
            direct_synapses_address=direct_synapses_address, key=key,
            n_rows=pre_vertex_slice.n_atoms, index=1,
            using_extra_monitor_cores=False)
        connections_2 = synaptic_manager._synapse_io.read_synapses(
            direct_synapse_information_2, pre_vertex_slice, post_vertex_slice,
            row_len_2, 0, 2, weight_scales, data_2, None,
            app_edge.n_delay_stages, machine_time_step)

        # The second matrix is a 1-1 matrix, so row length is 1
        assert row_len_2 == 1

        # Check that all the connections have the right weight and delay
        assert len(connections_2) == post_vertex_slice.n_atoms
        assert all([conn["weight"] == 2.5 for conn in connections_2])
        assert all([conn["delay"] == 2.0 for conn in connections_2])

        data_3, row_len_3 = synaptic_manager._retrieve_synaptic_block(
            transceiver=transceiver, placement=placement,
            master_pop_table_address=master_pop_table_address,
            indirect_synapses_address=indirect_synapses_address,
            direct_synapses_address=direct_synapses_address, key=key,
            n_rows=pre_vertex_slice.n_atoms, index=2,
            using_extra_monitor_cores=False)
        connections_3 = synaptic_manager._synapse_io.read_synapses(
            all_to_all_synapse_information, pre_vertex_slice,
            post_vertex_slice, row_len_3, 0, 2, weight_scales, data_3, None,
            app_edge.n_delay_stages, machine_time_step)

        # The third matrix is an all-to-all matrix, so length is n_atoms
        assert row_len_3 == post_vertex_slice.n_atoms

        # Check that all the connections have the right weight and delay
        assert len(connections_3) == \
            post_vertex_slice.n_atoms * pre_vertex_slice.n_atoms
        assert all([conn["weight"] == 4.5 for conn in connections_3])
        assert all([conn["delay"] == 4.0 for conn in connections_3])
예제 #10
0
def test_write_data_spec():
    unittest_setup()
    # UGLY but the mock transceiver NEED generate_on_machine to be False
    AbstractGenerateConnectorOnMachine.generate_on_machine = say_false
    machine = virtual_machine(2, 2)

    p.setup(1.0)
    load_config()
    p.set_number_of_neurons_per_core(p.IF_curr_exp, 100)
    pre_pop = p.Population(10,
                           p.IF_curr_exp(),
                           label="Pre",
                           additional_parameters={
                               "splitter":
                               SplitterAbstractPopulationVertexSlice()
                           })
    post_pop = p.Population(10,
                            p.IF_curr_exp(),
                            label="Post",
                            additional_parameters={
                                "splitter":
                                SplitterAbstractPopulationVertexSlice()
                            })
    proj_one_to_one_1 = p.Projection(pre_pop, post_pop, p.OneToOneConnector(),
                                     p.StaticSynapse(weight=1.5, delay=1.0))
    proj_one_to_one_2 = p.Projection(pre_pop, post_pop, p.OneToOneConnector(),
                                     p.StaticSynapse(weight=2.5, delay=2.0))
    proj_all_to_all = p.Projection(
        pre_pop, post_pop, p.AllToAllConnector(allow_self_connections=False),
        p.StaticSynapse(weight=4.5, delay=4.0))

    from_list_list = [(i, i, i, (i * 5) + 1) for i in range(10)]
    proj_from_list = p.Projection(pre_pop, post_pop,
                                  p.FromListConnector(from_list_list),
                                  p.StaticSynapse())

    app_graph = globals_variables.get_simulator().original_application_graph
    context = {"ApplicationGraph": app_graph}
    with (injection_context(context)):
        delay_support_adder(app_graph)
        machine_graph, _ = spynnaker_splitter_partitioner(
            app_graph, machine, 100)
        allocator = ZonedRoutingInfoAllocator()
        n_keys_map = edge_to_n_keys_mapper(machine_graph)
        routing_info = allocator.__call__(machine_graph,
                                          n_keys_map,
                                          flexible=False)

    post_vertex = next(iter(post_pop._vertex.machine_vertices))
    post_vertex_slice = post_vertex.vertex_slice
    post_vertex_placement = Placement(post_vertex, 0, 0, 3)

    temp_spec = tempfile.mktemp()
    spec = DataSpecificationGenerator(io.FileIO(temp_spec, "wb"), None)

    synaptic_matrices = SynapticMatrices(post_vertex_slice,
                                         n_synapse_types=2,
                                         all_single_syn_sz=10000,
                                         synaptic_matrix_region=1,
                                         direct_matrix_region=2,
                                         poptable_region=3,
                                         connection_builder_region=4)
    synaptic_matrices.write_synaptic_data(
        spec,
        post_pop._vertex.incoming_projections,
        all_syn_block_sz=10000,
        weight_scales=[32, 32],
        routing_info=routing_info)
    spec.end_specification()

    with io.FileIO(temp_spec, "rb") as spec_reader:
        executor = DataSpecificationExecutor(spec_reader, 20000)
        executor.execute()

    all_data = bytearray()
    all_data.extend(bytearray(executor.get_header()))
    all_data.extend(bytearray(executor.get_pointer_table(0)))
    for r in range(MAX_MEM_REGIONS):
        region = executor.get_region(r)
        if region is not None:
            all_data.extend(region.region_data)
    transceiver = MockTransceiverRawData(all_data)
    report_folder = mkdtemp()
    try:
        connections_1 = numpy.concatenate(
            synaptic_matrices.get_connections_from_machine(
                transceiver, post_vertex_placement,
                proj_one_to_one_1._projection_edge,
                proj_one_to_one_1._synapse_information))

        # Check that all the connections have the right weight and delay
        assert len(connections_1) == post_vertex_slice.n_atoms
        assert all([conn["weight"] == 1.5 for conn in connections_1])
        assert all([conn["delay"] == 1.0 for conn in connections_1])

        connections_2 = numpy.concatenate(
            synaptic_matrices.get_connections_from_machine(
                transceiver, post_vertex_placement,
                proj_one_to_one_2._projection_edge,
                proj_one_to_one_2._synapse_information))

        # Check that all the connections have the right weight and delay
        assert len(connections_2) == post_vertex_slice.n_atoms
        assert all([conn["weight"] == 2.5 for conn in connections_2])
        assert all([conn["delay"] == 2.0 for conn in connections_2])

        connections_3 = numpy.concatenate(
            synaptic_matrices.get_connections_from_machine(
                transceiver, post_vertex_placement,
                proj_all_to_all._projection_edge,
                proj_all_to_all._synapse_information))

        # Check that all the connections have the right weight and delay
        assert len(connections_3) == 100
        assert all([conn["weight"] == 4.5 for conn in connections_3])
        assert all([conn["delay"] == 4.0 for conn in connections_3])

        connections_4 = numpy.concatenate(
            synaptic_matrices.get_connections_from_machine(
                transceiver, post_vertex_placement,
                proj_from_list._projection_edge,
                proj_from_list._synapse_information))

        # Check that all the connections have the right weight and delay
        assert len(connections_4) == len(from_list_list)
        list_weights = [values[2] for values in from_list_list]
        list_delays = [values[3] for values in from_list_list]
        assert all(list_weights == connections_4["weight"])
        assert all(list_delays == connections_4["delay"])
    finally:
        shutil.rmtree(report_folder, ignore_errors=True)
예제 #11
0
def test_pop_based_master_pop_table_standard(undelayed_indices_connected,
                                             delayed_indices_connected,
                                             n_pre_neurons, neurons_per_core,
                                             expect_app_keys, max_delay):
    unittest_setup()
    machine = virtual_machine(12, 12)

    # Build a from list connector with the delays we want
    connections = []
    connections.extend([(i * neurons_per_core + j, j, 0, 10)
                        for i in undelayed_indices_connected
                        for j in range(100)])
    connections.extend([(i * neurons_per_core + j, j, 0, max_delay)
                        for i in delayed_indices_connected
                        for j in range(100)])

    # Make simple source and target, where the source has 1000 atoms
    # split into 10 vertices (100 each) and the target has 100 atoms in
    # a single vertex
    p.setup(1.0)
    post_pop = p.Population(100,
                            p.IF_curr_exp(),
                            label="Post",
                            additional_parameters={
                                "splitter":
                                SplitterAbstractPopulationVertexSlice()
                            })
    p.IF_curr_exp.set_model_max_atoms_per_core(neurons_per_core)
    pre_pop = p.Population(n_pre_neurons,
                           p.IF_curr_exp(),
                           label="Pre",
                           additional_parameters={
                               "splitter":
                               SplitterAbstractPopulationVertexSlice()
                           })
    p.Projection(pre_pop, post_pop, p.FromListConnector(connections),
                 p.StaticSynapse())

    app_graph = globals_variables.get_simulator().original_application_graph
    context = {"ApplicationGraph": app_graph}
    with (injection_context(context)):
        delay_support_adder(app_graph)
        machine_graph, _ = spynnaker_splitter_partitioner(
            app_graph, machine, 100)
        allocator = ZonedRoutingInfoAllocator()
        n_keys_map = edge_to_n_keys_mapper(machine_graph)
        routing_info = allocator.__call__(machine_graph,
                                          n_keys_map,
                                          flexible=False)

    post_mac_vertex = next(iter(post_pop._vertex.machine_vertices))
    post_vertex_slice = post_mac_vertex.vertex_slice

    # Generate the data
    temp_spec = tempfile.mktemp()
    spec = DataSpecificationGenerator(io.FileIO(temp_spec, "wb"), None)

    synaptic_matrices = SynapticMatrices(post_vertex_slice,
                                         n_synapse_types=2,
                                         all_single_syn_sz=10000,
                                         synaptic_matrix_region=1,
                                         direct_matrix_region=2,
                                         poptable_region=3,
                                         connection_builder_region=4)
    synaptic_matrices.write_synaptic_data(
        spec,
        post_pop._vertex.incoming_projections,
        all_syn_block_sz=1000000,
        weight_scales=[32, 32],
        routing_info=routing_info)

    with io.FileIO(temp_spec, "rb") as spec_reader:
        executor = DataSpecificationExecutor(spec_reader,
                                             SDRAM.max_sdram_found)
        executor.execute()

    # Read the population table and check entries
    region = executor.get_region(3)
    mpop_data = numpy.frombuffer(region.region_data,
                                 dtype="uint8").view("uint32")
    n_entries = mpop_data[0]
    n_addresses = mpop_data[1]

    # Compute how many entries and addresses there should be
    expected_n_entries = 0
    expected_n_addresses = 0
    if expect_app_keys:
        # Always one for undelayed, maybe one for delayed if present
        n_app_entries = 1 + int(bool(delayed_indices_connected))
        expected_n_entries += n_app_entries
        # 2 address list entries for each entry, as there is also extra_info
        expected_n_addresses += 2 * n_app_entries

    # If both delayed and undelayed, there is an entry for each incoming
    # machine edge
    elif delayed_indices_connected and undelayed_indices_connected:
        all_connected = set(undelayed_indices_connected)
        all_connected.update(delayed_indices_connected)
        expected_n_entries += len(all_connected)
        expected_n_addresses += len(all_connected)

    # If there are only undelayed indices, there is an entry for each
    elif undelayed_indices_connected:
        expected_n_entries += len(undelayed_indices_connected)
        expected_n_addresses += len(undelayed_indices_connected)

    # If there are only delayed indices, there are two entries for each because
    # the undelayed ones are still connected
    else:
        expected_n_entries += 2 * len(delayed_indices_connected)
        expected_n_addresses += 2 * len(delayed_indices_connected)

    assert (n_entries == expected_n_entries)
    assert (n_addresses == expected_n_addresses)
예제 #12
0
    def test_write_synaptic_matrix_and_master_population_table(self):
        MockSimulator.setup()
        # Add an sdram so max SDRAM is high enough
        SDRAM(10000)

        # UGLY but the mock transceiver NEED generate_on_machine to be False
        AbstractGenerateConnectorOnMachine.generate_on_machine = self.say_false
        default_config_paths = os.path.join(
            os.path.dirname(abstract_spinnaker_common.__file__),
            AbstractSpiNNakerCommon.CONFIG_FILE_NAME)

        config = conf_loader.load_config(
            AbstractSpiNNakerCommon.CONFIG_FILE_NAME, default_config_paths)
        config.set("Simulation", "one_to_one_connection_dtcm_max_bytes", 40)

        machine_time_step = 1000.0

        pre_app_vertex = SimpleApplicationVertex(10)
        pre_vertex_slice = Slice(0, 9)
        pre_vertex = pre_app_vertex.create_machine_vertex(
            pre_vertex_slice, None)
        post_app_vertex = SimpleApplicationVertex(10)
        post_vertex_slice = Slice(0, 9)
        post_vertex = post_app_vertex.create_machine_vertex(
            post_vertex_slice, None)
        post_slice_index = 0

        one_to_one_connector_1 = OneToOneConnector(None)
        direct_synapse_information_1 = SynapseInformation(
            one_to_one_connector_1, pre_app_vertex, post_app_vertex, False,
            False, None, SynapseDynamicsStatic(), 0, 1.5, 1.0)
        one_to_one_connector_1.set_projection_information(
            machine_time_step, direct_synapse_information_1)
        one_to_one_connector_2 = OneToOneConnector(None)
        direct_synapse_information_2 = SynapseInformation(
            one_to_one_connector_2, pre_app_vertex, post_app_vertex, False,
            False, None, SynapseDynamicsStatic(), 1, 2.5, 2.0)
        one_to_one_connector_2.set_projection_information(
            machine_time_step, direct_synapse_information_2)
        all_to_all_connector = AllToAllConnector(None)
        all_to_all_synapse_information = SynapseInformation(
            all_to_all_connector, pre_app_vertex, post_app_vertex, False,
            False, None, SynapseDynamicsStatic(), 0, 4.5, 4.0)
        all_to_all_connector.set_projection_information(
            machine_time_step, all_to_all_synapse_information)

        app_edge = ProjectionApplicationEdge(pre_app_vertex, post_app_vertex,
                                             direct_synapse_information_1)
        app_edge.add_synapse_information(direct_synapse_information_2)
        app_edge.add_synapse_information(all_to_all_synapse_information)
        machine_edge = app_edge.create_machine_edge(pre_vertex,
                                                    post_vertex,
                                                    label=None)
        partition_name = "TestPartition"

        graph = MachineGraph("Test")
        graph.add_vertex(pre_vertex)
        graph.add_vertex(post_vertex)
        graph.add_edge(machine_edge, partition_name)

        weight_scales = [4096.0, 4096.0]

        key = 0
        routing_info = RoutingInfo()
        routing_info.add_partition_info(
            PartitionRoutingInfo(
                [BaseKeyAndMask(key, 0xFFFFFFF0)],
                graph.get_outgoing_edge_partition_starting_at_vertex(
                    pre_vertex, partition_name)))

        temp_spec = tempfile.mktemp()
        spec_writer = FileDataWriter(temp_spec)
        spec = DataSpecificationGenerator(spec_writer, None)
        master_pop_sz = 1000
        all_syn_block_sz = 2000
        master_pop_region = 0
        synapse_region = 1
        direct_region = 2
        spec.reserve_memory_region(master_pop_region, master_pop_sz)
        spec.reserve_memory_region(synapse_region, all_syn_block_sz)

        synaptic_manager = SynapticManager(n_synapse_types=2,
                                           ring_buffer_sigma=5.0,
                                           spikes_per_second=100.0,
                                           config=config)
        # Poke in our testing region IDs
        synaptic_manager._pop_table_region = master_pop_region
        synaptic_manager._synaptic_matrix_region = synapse_region
        synaptic_manager._direct_matrix_region = direct_region

        synaptic_manager._write_synaptic_matrix_and_master_population_table(
            spec, [post_vertex_slice], post_slice_index, post_vertex,
            post_vertex_slice, all_syn_block_sz, weight_scales, routing_info,
            graph, machine_time_step)
        spec.end_specification()
        spec_writer.close()

        spec_reader = FileDataReader(temp_spec)
        executor = DataSpecificationExecutor(spec_reader,
                                             master_pop_sz + all_syn_block_sz)
        executor.execute()

        master_pop_table = executor.get_region(0)
        synaptic_matrix = executor.get_region(1)
        direct_matrix = executor.get_region(2)

        all_data = bytearray()
        all_data.extend(
            master_pop_table.region_data[:master_pop_table.max_write_pointer])
        all_data.extend(
            synaptic_matrix.region_data[:synaptic_matrix.max_write_pointer])
        all_data.extend(
            direct_matrix.region_data[:direct_matrix.max_write_pointer])
        master_pop_table_address = 0
        synaptic_matrix_address = master_pop_table.max_write_pointer
        direct_synapses_address = (synaptic_matrix_address +
                                   synaptic_matrix.max_write_pointer)
        direct_synapses_address += 4
        indirect_synapses_address = synaptic_matrix_address
        placement = Placement(None, 0, 0, 1)
        transceiver = MockTransceiverRawData(all_data)

        # Get the master population table details
        items = synaptic_manager._extract_synaptic_matrix_data_location(
            key, master_pop_table_address, transceiver, placement)

        # The first entry should be direct, but the rest should be indirect;
        # the second is potentially direct, but has been restricted by the
        # restriction on the size of the direct matrix
        assert len(items) == 3
        assert items[0][2]
        assert not items[1][2]
        assert not items[2][2]

        data_1, row_len_1 = synaptic_manager._retrieve_synaptic_block(
            txrx=transceiver,
            placement=placement,
            master_pop_table_address=master_pop_table_address,
            indirect_synapses_address=indirect_synapses_address,
            direct_synapses_address=direct_synapses_address,
            key=key,
            n_rows=pre_vertex_slice.n_atoms,
            index=0,
            using_monitors=False)
        connections_1 = synaptic_manager._read_synapses(
            direct_synapse_information_1, pre_vertex_slice, post_vertex_slice,
            row_len_1, 0, weight_scales, data_1, None, machine_time_step)

        # The first matrix is a 1-1 matrix, so row length is 1
        assert row_len_1 == 1

        # Check that all the connections have the right weight and delay
        assert len(connections_1) == post_vertex_slice.n_atoms
        assert all([conn["weight"] == 1.5 for conn in connections_1])
        assert all([conn["delay"] == 1.0 for conn in connections_1])

        data_2, row_len_2 = synaptic_manager._retrieve_synaptic_block(
            txrx=transceiver,
            placement=placement,
            master_pop_table_address=master_pop_table_address,
            indirect_synapses_address=indirect_synapses_address,
            direct_synapses_address=direct_synapses_address,
            key=key,
            n_rows=pre_vertex_slice.n_atoms,
            index=1,
            using_monitors=False)
        connections_2 = synaptic_manager._read_synapses(
            direct_synapse_information_2, pre_vertex_slice, post_vertex_slice,
            row_len_2, 0, weight_scales, data_2, None, machine_time_step)

        # The second matrix is a 1-1 matrix, so row length is 1
        assert row_len_2 == 1

        # Check that all the connections have the right weight and delay
        assert len(connections_2) == post_vertex_slice.n_atoms
        assert all([conn["weight"] == 2.5 for conn in connections_2])
        assert all([conn["delay"] == 2.0 for conn in connections_2])

        data_3, row_len_3 = synaptic_manager._retrieve_synaptic_block(
            txrx=transceiver,
            placement=placement,
            master_pop_table_address=master_pop_table_address,
            indirect_synapses_address=indirect_synapses_address,
            direct_synapses_address=direct_synapses_address,
            key=key,
            n_rows=pre_vertex_slice.n_atoms,
            index=2,
            using_monitors=False)
        connections_3 = synaptic_manager._read_synapses(
            all_to_all_synapse_information, pre_vertex_slice,
            post_vertex_slice, row_len_3, 0, weight_scales, data_3, None,
            machine_time_step)

        # The third matrix is an all-to-all matrix, so length is n_atoms
        assert row_len_3 == post_vertex_slice.n_atoms

        # Check that all the connections have the right weight and delay
        assert len(connections_3) == \
            post_vertex_slice.n_atoms * pre_vertex_slice.n_atoms
        assert all([conn["weight"] == 4.5 for conn in connections_3])
        assert all([conn["delay"] == 4.0 for conn in connections_3])
예제 #13
0
def test_write_data_spec():
    unittest_setup()
    # UGLY but the mock transceiver NEED generate_on_machine to be False
    AbstractGenerateConnectorOnMachine.generate_on_machine = say_false
    machine = virtual_machine(2, 2)

    p.setup(1.0)
    load_config()
    p.set_number_of_neurons_per_core(p.IF_curr_exp, 100)
    pre_pop = p.Population(
        10, p.IF_curr_exp(), label="Pre",
        additional_parameters={
            "splitter": SplitterAbstractPopulationVertexSlice()})
    post_pop = p.Population(
        10, p.IF_curr_exp(), label="Post",
        additional_parameters={
            "splitter": SplitterAbstractPopulationVertexSlice()})
    proj_one_to_one_1 = p.Projection(
        pre_pop, post_pop, p.OneToOneConnector(),
        p.StaticSynapse(weight=1.5, delay=1.0))
    proj_one_to_one_2 = p.Projection(
        pre_pop, post_pop, p.OneToOneConnector(),
        p.StaticSynapse(weight=2.5, delay=2.0))
    proj_all_to_all = p.Projection(
        pre_pop, post_pop, p.AllToAllConnector(allow_self_connections=False),
        p.StaticSynapse(weight=4.5, delay=4.0))

    # spynnaker8.setup(timestep=1)
    # # Add an sdram so max SDRAM is high enough
    # SDRAM(10000)
    #
    # set_config("Simulation", "one_to_one_connection_dtcm_max_bytes", 40)
    #
    # placements = Placements()
    # pre_app_population = MockPopulation(10, "mock pop pre")
    # pre_app_vertex = SimpleTestVertex(10, label="pre")
    # pre_app_vertex.splitter = MockSplitter()
    # pre_app_vertex.splitter._called = True
    # pre_vertex_slice = Slice(0, 9)
    #
    # post_app_population = MockPopulation(10, "mock pop post")
    # pre_vertex = pre_app_vertex.create_machine_vertex(
    #     pre_vertex_slice, None)
    # placements.add_placement(Placement(pre_vertex, 0, 0, 1))
    # post_app_vertex = SimpleTestVertex(10, label="post")
    # post_app_vertex.splitter = MockSplitter()
    # post_app_vertex.splitter._called = True
    # post_vertex_slice = Slice(0, 9)
    # post_vertex = post_app_vertex.create_machine_vertex(
    #     post_vertex_slice, None)
    # post_vertex_placement = Placement(post_vertex, 0, 0, 2)
    # placements.add_placement(post_vertex_placement)
    # delay_app_vertex = DelayExtensionVertex(
    #     10, 16, 51, pre_app_vertex, label="delay")
    # delay_app_vertex.set_new_n_delay_stages_and_delay_per_stage(
    #     16, 51)
    # delay_app_vertex.splitter = SplitterDelayVertexSlice(
    #     pre_app_vertex.splitter)
    # delay_vertex = DelayExtensionMachineVertex(
    #     resources_required=None, label="", constraints=[],
    #     app_vertex=delay_app_vertex, vertex_slice=post_vertex_slice)
    # placements.add_placement(Placement(delay_vertex, 0, 0, 3))
    # one_to_one_connector_1 = OneToOneConnector(None)
    # direct_synapse_information_1 = SynapseInformation(
    #     one_to_one_connector_1, pre_app_population, post_app_population,
    #     False, False, None, SynapseDynamicsStatic(), 0, True, 1.5, 1.0)
    # one_to_one_connector_1.set_projection_information(
    #     direct_synapse_information_1)
    # one_to_one_connector_2 = OneToOneConnector(None)
    # direct_synapse_information_2 = SynapseInformation(
    #     one_to_one_connector_2, pre_app_population, post_app_population,
    #     False, False, None, SynapseDynamicsStatic(), 1, True, 2.5, 2.0)
    # one_to_one_connector_2.set_projection_information(
    #     direct_synapse_information_2)
    # all_to_all_connector = AllToAllConnector(False)
    # all_to_all_synapse_information = SynapseInformation(
    #     all_to_all_connector, pre_app_population, post_app_population,
    #     False, False, None, SynapseDynamicsStatic(), 0, True, 4.5, 4.0)
    # all_to_all_connector.set_projection_information(
    #     all_to_all_synapse_information)
    from_list_list = [(i, i, i, (i * 5) + 1) for i in range(10)]
    proj_from_list = p.Projection(
        pre_pop, post_pop, p.FromListConnector(from_list_list),
        p.StaticSynapse())

    app_graph = globals_variables.get_simulator().original_application_graph
    context = {
        "ApplicationGraph": app_graph
    }
    with (injection_context(context)):
        delay_adder = DelaySupportAdder()
        delay_adder.__call__(app_graph)
        partitioner = SpynnakerSplitterPartitioner()
        machine_graph, _ = partitioner.__call__(app_graph, machine, 100)
        allocator = ZonedRoutingInfoAllocator()
        n_keys_mapper = EdgeToNKeysMapper()
        n_keys_map = n_keys_mapper.__call__(machine_graph)
        routing_info = allocator.__call__(
            machine_graph, n_keys_map, flexible=False)

    post_vertex = next(iter(post_pop._vertex.machine_vertices))
    post_vertex_slice = post_vertex.vertex_slice
    post_vertex_placement = Placement(post_vertex, 0, 0, 3)

    temp_spec = tempfile.mktemp()
    spec = DataSpecificationGenerator(io.FileIO(temp_spec, "wb"), None)

    synaptic_matrices = SynapticMatrices(
        post_vertex_slice, n_synapse_types=2, all_single_syn_sz=10000,
        synaptic_matrix_region=1, direct_matrix_region=2, poptable_region=3,
        connection_builder_region=4)
    synaptic_matrices.write_synaptic_data(
        spec, post_pop._vertex.incoming_projections, all_syn_block_sz=10000,
        weight_scales=[32, 32], routing_info=routing_info)
    spec.end_specification()

    with io.FileIO(temp_spec, "rb") as spec_reader:
        executor = DataSpecificationExecutor(spec_reader, 20000)
        executor.execute()

    all_data = bytearray()
    all_data.extend(bytearray(executor.get_header()))
    all_data.extend(bytearray(executor.get_pointer_table(0)))
    for r in range(MAX_MEM_REGIONS):
        region = executor.get_region(r)
        if region is not None:
            all_data.extend(region.region_data)
    transceiver = MockTransceiverRawData(all_data)
    report_folder = mkdtemp()
    try:
        connections_1 = numpy.concatenate(
            synaptic_matrices.get_connections_from_machine(
                transceiver, post_vertex_placement,
                proj_one_to_one_1._projection_edge,
                proj_one_to_one_1._synapse_information))

        # Check that all the connections have the right weight and delay
        assert len(connections_1) == post_vertex_slice.n_atoms
        assert all([conn["weight"] == 1.5 for conn in connections_1])
        assert all([conn["delay"] == 1.0 for conn in connections_1])

        connections_2 = numpy.concatenate(
            synaptic_matrices.get_connections_from_machine(
                transceiver, post_vertex_placement,
                proj_one_to_one_2._projection_edge,
                proj_one_to_one_2._synapse_information))

        # Check that all the connections have the right weight and delay
        assert len(connections_2) == post_vertex_slice.n_atoms
        assert all([conn["weight"] == 2.5 for conn in connections_2])
        assert all([conn["delay"] == 2.0 for conn in connections_2])

        connections_3 = numpy.concatenate(
            synaptic_matrices.get_connections_from_machine(
                transceiver, post_vertex_placement,
                proj_all_to_all._projection_edge,
                proj_all_to_all._synapse_information))

        # Check that all the connections have the right weight and delay
        assert len(connections_3) == 100
        assert all([conn["weight"] == 4.5 for conn in connections_3])
        assert all([conn["delay"] == 4.0 for conn in connections_3])

        connections_4 = numpy.concatenate(
            synaptic_matrices.get_connections_from_machine(
                transceiver, post_vertex_placement,
                proj_from_list._projection_edge,
                proj_from_list._synapse_information))

        # Check that all the connections have the right weight and delay
        assert len(connections_4) == len(from_list_list)
        list_weights = [values[2] for values in from_list_list]
        list_delays = [values[3] for values in from_list_list]
        assert all(list_weights == connections_4["weight"])
        assert all(list_delays == connections_4["delay"])
    finally:
        shutil.rmtree(report_folder, ignore_errors=True)