def generate_data_spec(self, subvertex, placement, sub_graph, graph,
                           routing_info, hostname, graph_sub_graph_mapper,
                           report_folder, ip_tags, reverse_ip_tags,
                           write_text_specs, application_run_time_folder):
        """
        Model-specific construction of the data blocks necessary to build a
        single Application Monitor on one core.
        """
        data_writer, report_writer = \
            self.get_data_spec_file_writers(
                placement.x, placement.y, placement.p, hostname, report_folder,
                write_text_specs, application_run_time_folder)

        spec = DataSpecificationGenerator(data_writer, report_writer)

        spec.comment("\n*** Spec for AppMonitor Instance ***\n\n")

        # Construct the data images needed for the Neuron:
        self.reserve_memory_regions(spec)
        self.write_setup_info(spec)
        self.write_configuration_region(spec, ip_tags)

        # End-of-Spec:
        spec.end_specification()
        data_writer.close()
    def generate_data_spec(
            self, subvertex, placement, sub_graph, graph, routing_info,
            hostname, graph_subgraph_mapper, report_folder, ip_tags,
            reverse_ip_tags, write_text_specs, application_run_time_folder):

        # Create new DataSpec for this processor:
        data_writer, report_writer = \
            self.get_data_spec_file_writers(
                placement.x, placement.y, placement.p, hostname, report_folder,
                write_text_specs, application_run_time_folder)
        spec = DataSpecificationGenerator(data_writer, report_writer)

        self._update_virtual_key(routing_info, sub_graph)
        self._fill_send_buffer()

        # Reserve regions
        self._reserve_regions(spec)

        # Write the system region
        self._write_basic_setup_info(spec, self._REGIONS.SYSTEM.value)

        # Write the additional recording information
        self.write_recording_data(
            spec, ip_tags, [self._record_buffer_size],
            self._buffer_size_before_receive)

        # Write the configuration information
        self._write_configuration(spec, routing_info, sub_graph, ip_tags)

        # End spec
        spec.end_specification()
        data_writer.close()

        return [data_writer.filename]
    def generate_data_spec(
            self, subvertex, placement, sub_graph, graph, routing_info,
            hostname, graph_subgraph_mapper, report_folder, ip_tags,
            reverse_ip_tags, write_text_specs, application_run_time_folder):

        # Create new DataSpec for this processor:
        data_writer, report_writer = \
            self.get_data_spec_file_writers(
                placement.x, placement.y, placement.p, hostname, report_folder,
                write_text_specs, application_run_time_folder)
        spec = DataSpecificationGenerator(data_writer, report_writer)

        self._update_virtual_key(routing_info, sub_graph)
        self._fill_send_buffer()

        # Reserve regions
        self._reserve_regions(spec)

        # Write the system region
        self._write_basic_setup_info(spec, self._REGIONS.SYSTEM.value)

        # Write the additional recording information
        self.write_recording_data(
            spec, ip_tags, [self._record_buffer_size],
            self._buffer_size_before_receive)

        # Write the configuration information
        self._write_configuration(spec, routing_info, sub_graph, ip_tags)

        # End spec
        spec.end_specification()
        data_writer.close()

        return data_writer.filename
    def generate_data_spec(
            self, subvertex, placement, partitioned_graph, graph, routing_info,
            hostname, graph_mapper, report_folder, ip_tags, reverse_ip_tags,
            write_text_specs, application_run_time_folder):
        data_writer, report_writer = \
            self.get_data_spec_file_writers(
                placement.x, placement.y, placement.p, hostname, report_folder,
                write_text_specs, application_run_time_folder)

        spec = DataSpecificationGenerator(data_writer, report_writer)

        vertex_slice = graph_mapper.get_subvertex_slice(subvertex)

        spike_buffer_size = self._spike_recorder.get_sdram_usage_in_bytes(
            vertex_slice.n_atoms, self._max_spikes_per_ts(vertex_slice),
            self._no_machine_time_steps)
        spike_history_sz = recording_utils.get_buffer_sizes(
            self._spike_buffer_max_size, spike_buffer_size,
            self._enable_buffered_recording)
        spike_buffering_needed = recording_utils.needs_buffering(
            self._spike_buffer_max_size, spike_buffer_size,
            self._enable_buffered_recording)
        buffer_size_before_receive = self._buffer_size_before_receive
        if not spike_buffering_needed:
            buffer_size_before_receive = spike_history_sz + 256

        spec.comment("\n*** Spec for SpikeSourcePoisson Instance ***\n\n")

        # Basic setup plus 8 bytes for recording flags and recording size
        setup_sz = ((front_end_common_constants.
                     DATA_SPECABLE_BASIC_SETUP_INFO_N_WORDS * 4) +
                    subvertex.get_recording_data_size(1))

        poisson_params_sz = self.get_params_bytes(vertex_slice)

        # Reserve SDRAM space for memory areas:
        self.reserve_memory_regions(
            spec, setup_sz, poisson_params_sz, spike_history_sz, subvertex)

        self._write_setup_info(
            spec, spike_history_sz, ip_tags, buffer_size_before_receive,
            subvertex)

        # Every subedge should have the same key
        key = None
        partitions = partitioned_graph.\
            outgoing_edges_partitions_from_vertex(subvertex)
        for partition in partitions.values():
            keys_and_masks = \
                routing_info.get_keys_and_masks_from_partition(partition)
            key = keys_and_masks[0].key

        self._write_poisson_parameters(spec, key, vertex_slice)

        # End-of-Spec:
        spec.end_specification()
        data_writer.close()

        return data_writer.filename
    def generate_data_spec(self, subvertex, placement, subgraph, graph,
                           routing_info, hostname, graph_mapper, report_folder,
                           ip_tags, reverse_ip_tags, write_text_specs,
                           application_run_time_folder):
        """
        Model-specific construction of the data blocks necessary to build a
        single SpikeSourcePoisson on one core.
        :param subvertex:
        :param placement:
        :param subgraph:
        :param graph:
        :param routing_info:
        :param hostname:
        :param graph_mapper:
        :param report_folder:
        :param ip_tags:
        :param reverse_ip_tags:
        :param write_text_specs:
        :param application_run_time_folder:
        :return:
        """
        data_writer, report_writer = \
            self.get_data_spec_file_writers(
                placement.x, placement.y, placement.p, hostname, report_folder,
                write_text_specs, application_run_time_folder)

        spec = DataSpecificationGenerator(data_writer, report_writer)

        vertex_slice = graph_mapper.get_subvertex_slice(subvertex)

        spike_hist_buff_sz = self._spike_recorder.get_sdram_usage_in_bytes(
            vertex_slice.n_atoms, self._no_machine_time_steps)

        spec.comment("\n*** Spec for SpikeSourcePoisson Instance ***\n\n")

        # Basic setup plus 8 bytes for recording flags and recording size
        setup_sz = ((constants.DATA_SPECABLE_BASIC_SETUP_INFO_N_WORDS * 4) + 8)

        poisson_params_sz = self.get_params_bytes(vertex_slice)

        # Reserve SDRAM space for memory areas:
        self.reserve_memory_regions(
            spec, setup_sz, poisson_params_sz, spike_hist_buff_sz)

        self.write_setup_info(spec, spike_hist_buff_sz)

        # Every subedge should have the same key
        key = None
        subedges = subgraph.outgoing_subedges_from_subvertex(subvertex)
        if len(subedges) > 0:
            keys_and_masks = routing_info.get_keys_and_masks_from_subedge(
                subedges[0])
            key = keys_and_masks[0].key

        self.write_poisson_parameters(spec, key, vertex_slice.n_atoms)

        # End-of-Spec:
        spec.end_specification()
        data_writer.close()
    def generate_data_spec(self, subvertex, placement, subgraph, graph,
                           routing_info, hostname, graph_mapper, report_folder,
                           ip_tags, reverse_ip_tags, write_text_specs,
                           application_run_time_folder):
        data_writer, report_writer = \
            self.get_data_spec_file_writers(
                placement.x, placement.y, placement.p, hostname, report_folder,
                write_text_specs, application_run_time_folder)

        spec = DataSpecificationGenerator(data_writer, report_writer)

        vertex_slice = graph_mapper.get_subvertex_slice(subvertex)

        spike_hist_buff_sz = self._spike_recorder.get_sdram_usage_in_bytes(
            vertex_slice.n_atoms, self._no_machine_time_steps)
        buffer_size_before_receive = self._buffer_size_before_receive
        if config.getboolean("Buffers", "enable_buffered_recording"):
            if spike_hist_buff_sz < self._spike_buffer_max_size:
                buffer_size_before_receive = spike_hist_buff_sz + 256
            else:
                spike_hist_buff_sz = self._spike_buffer_max_size
        else:
            buffer_size_before_receive = spike_hist_buff_sz + 256

        spec.comment("\n*** Spec for SpikeSourcePoisson Instance ***\n\n")

        # Basic setup plus 8 bytes for recording flags and recording size
        setup_sz = ((constants.DATA_SPECABLE_BASIC_SETUP_INFO_N_WORDS * 4) +
                    self.get_recording_data_size(1))

        poisson_params_sz = self.get_params_bytes(vertex_slice)

        # Reserve SDRAM space for memory areas:
        self.reserve_memory_regions(
            spec, setup_sz, poisson_params_sz, spike_hist_buff_sz)

        self._write_setup_info(
            spec, spike_hist_buff_sz, ip_tags, buffer_size_before_receive)

        # Every subedge should have the same key
        key = None
        subedges = subgraph.outgoing_subedges_from_subvertex(subvertex)
        if len(subedges) > 0:
            keys_and_masks = routing_info.get_keys_and_masks_from_subedge(
                subedges[0])
            key = keys_and_masks[0].key

        self._write_poisson_parameters(spec, key, vertex_slice.n_atoms)

        # End-of-Spec:
        spec.end_specification()
        data_writer.close()

        return [data_writer.filename]
示例#7
0
def get_data_spec_and_file_writer_filename(processor_chip_x, processor_chip_y,
                                           processor_id, hostname,
                                           report_directory, write_text_specs,
                                           application_run_time_report_folder):
    """ Encapsulates the creation of the dsg writer and the file paths

    :param processor_chip_x: x-coordinate of the chip
    :type processor_chip_x: int
    :param processor_chip_y: y-coordinate of the chip
    :type processor_chip_y: int
    :param processor_id: The processor ID
    :type processor_id: int
    :param hostname: The hostname of the spinnaker machine
    :type hostname: str
    :param report_directory: the directory for the reports folder
    :type report_directory: file path
    :param write_text_specs:\
        True if a textual version of the specification should be written
    :type write_text_specs: bool
    :param application_run_time_report_folder:\
        The folder to contain the resulting specification files
    :type application_run_time_report_folder: str
    :return: the filename of the data writer and the data specification object
    :rtype: str, DataSpecificationGenerator
    """

    binary_file_path = get_data_spec_file_path(
        processor_chip_x, processor_chip_y, processor_id, hostname,
        application_run_time_report_folder)
    data_writer = FileDataWriter(binary_file_path)

    # check if text reports are needed and if so initialise the report
    # writer to send down to dsg
    report_writer = None
    if write_text_specs:
        new_report_directory = os.path.join(report_directory,
                                            "data_spec_text_files")

        # uses locks to stop multiple instances of this writing the same
        # folder at the same time (os breaks down and throws exception
        # otherwise)
        _lock_condition.acquire()
        if not os.path.exists(new_report_directory):
            os.mkdir(new_report_directory)
        _lock_condition.release()

        file_name = "{}_dataSpec_{}_{}_{}.txt" \
            .format(hostname, processor_chip_x, processor_chip_y,
                    processor_id)
        report_file_path = os.path.join(new_report_directory, file_name)
        report_writer = FileDataWriter(report_file_path)

    # build the file writer for the spec
    spec = DataSpecificationGenerator(data_writer, report_writer)

    return data_writer.filename, spec
示例#8
0
    def generate_data_spec(
            self, subvertex, placement, partitioned_graph, graph, routing_info,
            hostname, graph_mapper, report_folder, ip_tags, reverse_ip_tags,
            write_text_specs, application_run_time_folder):
        data_writer, report_writer = \
            self.get_data_spec_file_writers(
                placement.x, placement.y, placement.p, hostname, report_folder,
                write_text_specs, application_run_time_folder)

        spec = DataSpecificationGenerator(data_writer, report_writer)

        vertex_slice = graph_mapper.get_subvertex_slice(subvertex)

        spike_buffer_size = self._spike_recorder.get_sdram_usage_in_bytes(
            vertex_slice.n_atoms, self._no_machine_time_steps)
        spike_history_sz = recording_utils.get_buffer_sizes(
            self._spike_buffer_max_size, spike_buffer_size,
            self._enable_buffered_recording)
        spike_buffering_needed = recording_utils.needs_buffering(
            self._spike_buffer_max_size, spike_buffer_size,
            self._enable_buffered_recording)
        buffer_size_before_receive = self._buffer_size_before_receive
        if not spike_buffering_needed:
            buffer_size_before_receive = spike_history_sz + 256

        spec.comment("\n*** Spec for SpikeSourcePoisson Instance ***\n\n")

        # Basic setup plus 8 bytes for recording flags and recording size
        setup_sz = ((front_end_common_constants.
                     DATA_SPECABLE_BASIC_SETUP_INFO_N_WORDS * 4) +
                    subvertex.get_recording_data_size(1))

        poisson_params_sz = self.get_params_bytes(vertex_slice)

        # Reserve SDRAM space for memory areas:
        self.reserve_memory_regions(
            spec, setup_sz, poisson_params_sz, spike_history_sz, subvertex)

        self._write_setup_info(
            spec, spike_history_sz, ip_tags, buffer_size_before_receive,
            subvertex)

        # Every subedge should have the same key
        key = None
        partitions = partitioned_graph.\
            outgoing_edges_partitions_from_vertex(subvertex)
        for partition in partitions.values():
            keys_and_masks = \
                routing_info.get_keys_and_masks_from_partition(partition)
            key = keys_and_masks[0].key

        self._write_poisson_parameters(spec, key, vertex_slice)

        # End-of-Spec:
        spec.end_specification()
        data_writer.close()

        return data_writer.filename
    def generate_data_spec(self, subvertex, placement, sub_graph, graph,
                           routing_info, hostname, graph_sub_graph_mapper,
                           report_folder, ip_tags, reverse_ip_tags,
                           write_text_specs, application_run_time_folder):
        """
        """
        data_writer, report_writer = \
            self.get_data_spec_file_writers(
                placement.x, placement.y, placement.p, hostname, report_folder,
                write_text_specs, application_run_time_folder)

        spec = DataSpecificationGenerator(data_writer, report_writer)

        spec.comment("\n*** Spec for LivePacketGather Instance ***\n\n")

        # Construct the data images needed for the Neuron:
        self.reserve_memory_regions(spec)
        self.write_setup_info(spec)
        self.write_configuration_region(spec, ip_tags)

        # End-of-Spec:
        spec.end_specification()
        data_writer.close()
        return [data_writer.filename]
示例#10
0
    def generate_data_spec(
            self, subvertex, placement, subgraph, graph, routing_info,
            hostname, graph_mapper, report_folder, ip_tags, reverse_ip_tags,
            write_text_specs, application_run_time_folder):
        """
        Model-specific construction of the data blocks necessary to build a
        single SpikeSource Array on one core.
        :param subvertex:
        :param placement:
        :param subgraph:
        :param graph:
        :param routing_info:
        :param hostname:
        :param graph_mapper:
        :param report_folder:
        :param ip_tags:
        :param reverse_ip_tags:
        :param write_text_specs:
        :param application_run_time_folder:
        :return:
        """
        data_writer, report_writer = \
            self.get_data_spec_file_writers(
                placement.x, placement.y, placement.p, hostname, report_folder,
                write_text_specs, application_run_time_folder)

        spec = DataSpecificationGenerator(data_writer, report_writer)

        spec.comment("\n*** Spec for SpikeSourceArray Instance ***\n\n")

        # ###################################################################
        # Reserve SDRAM space for memory areas:
        spec.comment("\nReserving memory space for spike data region:\n\n")
        vertex_slice = graph_mapper.get_subvertex_slice(subvertex)
        spike_buffer = self._get_spike_send_buffer(vertex_slice)
        recording_size = (spike_buffer.total_region_size + 4 +
                          _RECORD_OVERALLOCATION)

        self._reserve_memory_regions(spec, spike_buffer.buffer_size,
                                     recording_size)

        self._write_setup_info(
            spec, spike_buffer.buffer_size, ip_tags, recording_size)

        # End-of-Spec:
        spec.end_specification()
        data_writer.close()

        # tell the subvertex its region size
        subvertex.region_size = recording_size
示例#11
0
    def generate_data_spec(self, subvertex, placement, subgraph, graph,
                           routing_info, hostname, graph_mapper, report_folder,
                           ip_tags, reverse_ip_tags, write_text_specs,
                           application_run_time_folder):
        """
        Model-specific construction of the data blocks necessary to build a
        single SpikeSourcePoisson on one core.
        :param subvertex:
        :param placement:
        :param subgraph:
        :param graph:
        :param routing_info:
        :param hostname:
        :param graph_mapper:
        :param report_folder:
        :param ip_tags:
        :param reverse_ip_tags:
        :param write_text_specs:
        :param application_run_time_folder:
        :return:
        """
        data_writer, report_writer = \
            self.get_data_spec_file_writers(
                placement.x, placement.y, placement.p, hostname, report_folder,
                write_text_specs, application_run_time_folder)

        spec = DataSpecificationGenerator(data_writer, report_writer)

        vertex_slice = graph_mapper.get_subvertex_slice(subvertex)

        spike_hist_buff_sz = self.get_spike_buffer_size(vertex_slice)

        spec.comment("\n*** Spec for SpikeSourcePoisson Instance ***\n\n")

        # Basic setup plus 8 bytes for recording flags and recording size
        setup_sz = ((constants.DATA_SPECABLE_BASIC_SETUP_INFO_N_WORDS * 4) + 8)

        poisson_params_sz = self.get_params_bytes(vertex_slice)

        # Reserve SDRAM space for memory areas:
        self.reserve_memory_regions(spec, setup_sz, poisson_params_sz,
                                    spike_hist_buff_sz)

        self.write_setup_info(spec, spike_hist_buff_sz)

        # Every subedge should have the same key
        keys_and_masks = routing_info.get_keys_and_masks_from_subedge(
            subgraph.outgoing_subedges_from_subvertex(subvertex)[0])
        key = keys_and_masks[0].key

        self.write_poisson_parameters(spec, key, vertex_slice.n_atoms)

        # End-of-Spec:
        spec.end_specification()
        data_writer.close()
示例#12
0
    def generate_data_spec(self, subvertex, placement, subgraph, graph,
                           routing_info, hostname, graph_mapper, report_folder,
                           ip_tags, reverse_ip_tags, write_text_specs,
                           application_run_time_folder):
        """
        Model-specific construction of the data blocks necessary to build a
        single SpikeSource Array on one core.
        :param subvertex:
        :param placement:
        :param subgraph:
        :param graph:
        :param routing_info:
        :param hostname:
        :param graph_mapper:
        :param report_folder:
        :param ip_tags:
        :param reverse_ip_tags:
        :param write_text_specs:
        :param application_run_time_folder:
        :return:
        """
        data_writer, report_writer = \
            self.get_data_spec_file_writers(
                placement.x, placement.y, placement.p, hostname, report_folder,
                write_text_specs, application_run_time_folder)

        spec = DataSpecificationGenerator(data_writer, report_writer)

        spec.comment("\n*** Spec for SpikeSourceArray Instance ***\n\n")

        # ###################################################################
        # Reserve SDRAM space for memory areas:
        spec.comment("\nReserving memory space for spike data region:\n\n")
        spike_buffer = self._get_spike_send_buffer(
            graph_mapper.get_subvertex_slice(subvertex))
        self._reserve_memory_regions(spec, spike_buffer.buffer_size)

        self._write_setup_info(spec, spike_buffer.buffer_size, ip_tags)

        # End-of-Spec:
        spec.end_specification()
        data_writer.close()
    def generate_data_spec(self, placement, sub_graph, routing_info, hostname,
                           report_folder, ip_tags, reverse_ip_tags,
                           write_text_specs, application_run_time_folder):

        data_writer, report_writer = \
            self.get_data_spec_file_writers(
                placement.x, placement.y, placement.p, hostname, report_folder,
                write_text_specs, application_run_time_folder)

        spec = DataSpecificationGenerator(data_writer, report_writer)

        spec.comment("\n*** Spec for LivePacketGather Instance ***\n\n")

        # Construct the data images needed for the Neuron:
        self._reserve_memory_regions(spec)
        self._write_setup_info(spec)
        self._write_configuration_region(spec, ip_tags)

        # End-of-Spec:
        spec.end_specification()
        data_writer.close()

        return data_writer.filename
    def generate_data_spec(self, subvertex, placement, sub_graph, graph,
                           routing_info, hostname, graph_mapper,
                           report_folder, ip_tags, reverse_ip_tags,
                           write_text_specs, application_run_time_folder):
        # Create new DataSpec for this processor:
        data_writer, report_writer = \
            self.get_data_spec_file_writers(
                placement.x, placement.y, placement.p, hostname, report_folder,
                write_text_specs, application_run_time_folder)

        spec = DataSpecificationGenerator(data_writer, report_writer)

        spec.comment("\n*** Spec for block of {} neurons ***\n"
                     .format(self.model_name))

        spec.comment("\nReserving memory space for data regions:\n\n")

        # Reserve memory regions:
        spec.reserve_memory_region(
            region=self._REVERSE_IPTAG_MULTICAST_REGIONS.SYSTEM.value,
            size=constants.DATA_SPECABLE_BASIC_SETUP_INFO_N_WORDS * 4 + 8,
            label='SYSTEM')
        spec.reserve_memory_region(
            region=self._REVERSE_IPTAG_MULTICAST_REGIONS.CONFIGURATION.value,
            size=self._CONFIGURATION_REGION_SIZE, label='CONFIGURATION')
        if self._buffer_space is not None and self._buffer_space > 0:
            spec.reserve_memory_region(
                region=self._REVERSE_IPTAG_MULTICAST_REGIONS.BUFFER.value,
                size=self._buffer_space, label="BUFFER", empty=True)

        # set up system region writes
        self._write_basic_setup_info(
            spec, self._REVERSE_IPTAG_MULTICAST_REGIONS.SYSTEM.value)

        # TODO this can be removed once buffered out functionality is in place.
        # As then live injection can be recorded
        spec.write_value(data=0)
        spec.write_value(data=0)

        # set up configuration region writes
        spec.switch_write_focus(
            region=self._REVERSE_IPTAG_MULTICAST_REGIONS.CONFIGURATION.value)

        if self._virtual_key is None:
            subedge_routing_info = \
                routing_info.get_subedge_information_from_subedge(
                    sub_graph.outgoing_subedges_from_subvertex(subvertex)[0])
            key_and_mask = subedge_routing_info.keys_and_masks[0]
            self._mask = key_and_mask.mask
            self._virtual_key = key_and_mask.key

            if self._prefix is None:
                if self._prefix_type is None:
                    self._prefix_type = EIEIOPrefix.UPPER_HALF_WORD
                self._prefix = self._generate_prefix(self._virtual_key,
                                                     self._prefix_type)

        # add prefix boolean value
        if self._prefix is None:
            spec.write_value(data=0)
        else:
            spec.write_value(data=1)

        # add prefix
        if self._prefix is None:
            spec.write_value(data=0)
        else:
            if self._prefix_type is EIEIOPrefix.LOWER_HALF_WORD:
                spec.write_value(data=self._prefix)
            else:
                spec.write_value(data=self._prefix << 16)

        # key left shift
        spec.write_value(data=self._key_left_shift)

        # add key check
        if self._check_key:
            spec.write_value(data=1)
        else:
            spec.write_value(data=0)

        # add key and mask
        spec.write_value(data=self._virtual_key)
        spec.write_value(data=self._mask)

        # Buffering control
        spec.write_value(data=self._buffer_space)
        spec.write_value(data=self._space_before_notification)

        # Notification
        if self._notify_buffer_space:
            ip_tag = iter(ip_tags).next()
            spec.write_value(data=ip_tag.tag)
        else:
            spec.write_value(data=0)

        # close spec
        spec.end_specification()
        data_writer.close()
    def generate_data_spec(self, subvertex, placement, subgraph, graph,
                           routing_info, hostname, graph_subgraph_mapper,
                           report_folder, ip_tags, reverse_ip_tags,
                           write_text_specs, application_run_time_folder):
        """
        Model-specific construction of the data blocks necessary to build a
        single external retina device.
        """
        # Create new DataSpec for this processor:
        data_writer, report_writer = \
            self.get_data_spec_file_writers(
                placement.x, placement.y, placement.p, hostname, report_folder,
                write_text_specs, application_run_time_folder)

        spec = DataSpecificationGenerator(data_writer, report_writer)

        # reserve regions
        self.reserve_memory_regions(spec)

        # Write the setup region
        spec.comment("\n*** Spec for robot motor control ***\n\n")
        self._write_basic_setup_info(spec, self.SYSTEM_REGION)

        # locate correct subedge for key
        edge_key = None
        if len(graph.outgoing_edges_from_vertex(self)) != 1:
            raise exceptions.SpynnakerException(
                "This motor should only have one outgoing edge to the robot")

        for subedge in subgraph.outgoing_subedges_from_subvertex(subvertex):
            edge_keys_and_masks = \
                routing_info.get_keys_and_masks_from_subedge(subedge)
            edge_key = edge_keys_and_masks[0].key

        # write params to memory
        spec.switch_write_focus(region=self.PARAMS_REGION)
        spec.write_value(data=edge_key)
        spec.write_value(data=self._speed)
        spec.write_value(data=self._sample_time)
        spec.write_value(data=self._update_time)
        spec.write_value(data=self._delay_time)
        spec.write_value(data=self._delta_threshold)
        if self._continue_if_not_different:
            spec.write_value(data=1)
        else:
            spec.write_value(data=0)

        # End-of-Spec:
        spec.end_specification()
        data_writer.close()

        return [data_writer.filename]
示例#16
0
    def generate_data_spec(
            self, subvertex, placement, partitioned_graph, graph, routing_info,
            hostname, graph_mapper, report_folder, ip_tags, reverse_ip_tags,
            write_text_specs, application_run_time_folder):
        """
        :param subvertex:
        :param placement:
        :param partitioned_graph:
        :param graph:
        :param routing_info:
        :param hostname:
        :param graph_mapper:
        :param report_folder:
        :param ip_tags:
        :param reverse_ip_tags:
        :param write_text_specs:
        :param application_run_time_folder:
        :return:
        """

        data_writer, report_writer = \
            self.get_data_spec_file_writers(
                placement.x, placement.y, placement.p, hostname, report_folder,
                write_text_specs, application_run_time_folder)

        spec = DataSpecificationGenerator(data_writer, report_writer)

        # reserve region - add a word for the region size
        n_command_bytes = self._get_n_command_bytes()
        self._reserve_memory_regions(spec, n_command_bytes + 4, subvertex)

        # Write system region
        spec.comment("\n*** Spec for multicast source ***\n\n")
        self._write_basic_setup_info(
            spec, CommandSenderPartitionedVertex.SYSTEM_REGION)

        # Go through the times and replace negative times with positive ones
        new_times = set()
        for time in self._times_with_commands:
            if time < 0 and self._no_machine_time_steps is not None:
                real_time = self._no_machine_time_steps + (time + 1)
                if time in self._commands_with_payloads:
                    if real_time in self._commands_with_payloads:
                        self._commands_with_payloads[real_time].extend(
                            self._commands_with_payloads[time])
                    else:
                        self._commands_with_payloads[real_time] = \
                            self._commands_with_payloads[time]
                    del self._commands_with_payloads[time]
                if time in self._commands_without_payloads:
                    if real_time in self._commands_without_payloads:
                        self._commands_without_payloads[real_time].extend(
                            self._commands_without_payloads[time])
                    else:
                        self._commands_without_payloads[real_time] = \
                            self._commands_without_payloads[time]
                    del self._commands_without_payloads[time]
                new_times.add(real_time)

            # if runtime is infinite, then there's no point storing end of
            # simulation events, as they will never occur
            elif time < 0 and self._no_machine_time_steps is None:
                if time in self._commands_with_payloads:
                    del self._commands_with_payloads[time]
                if time in self._commands_without_payloads:
                    del self._commands_without_payloads[time]
            else:
                new_times.add(time)

        # write commands
        spec.switch_write_focus(region=CommandSenderPartitionedVertex.COMMANDS)
        spec.write_value(n_command_bytes)
        for time in sorted(new_times):

            # Gather the different types of commands
            with_payload = list()
            if time in self._commands_with_payloads:
                with_payload = self._commands_with_payloads[time]
            without_payload = list()
            if time in self._commands_without_payloads:
                without_payload = self._commands_without_payloads[time]

            spec.write_value(time)

            spec.write_value(len(with_payload))
            for command in with_payload:
                spec.write_value(self._get_key(
                    command, graph_mapper, routing_info, partitioned_graph))
                payload = command.get_payload(routing_info, partitioned_graph,
                                              graph_mapper)
                spec.write_value(payload)
                spec.write_value(command.repeat << 16 |
                                 command.delay_between_repeats)

            spec.write_value(len(without_payload))
            for command in without_payload:
                spec.write_value(self._get_key(
                    command, graph_mapper, routing_info, partitioned_graph))
                spec.write_value(command.repeat << 16 |
                                 command.delay_between_repeats)

        # End-of-Spec:
        spec.end_specification()
        data_writer.close()

        return data_writer.filename
示例#17
0
    def generate_data_spec(self, subvertex, placement, partitioned_graph,
                           graph, routing_info, hostname,
                           graph_subgraph_mapper, report_folder, ip_tags,
                           reverse_ip_tags, write_text_specs,
                           application_run_time_folder):
        """
        Model-specific construction of the data blocks necessary to build a
        single external retina device.
        """
        # Create new DataSpec for this processor:
        data_writer, report_writer = \
            self.get_data_spec_file_writers(
                placement.x, placement.y, placement.p, hostname, report_folder,
                write_text_specs, application_run_time_folder)

        spec = DataSpecificationGenerator(data_writer, report_writer)

        # reserve regions
        self.reserve_memory_regions(spec)

        # Write the setup region
        spec.comment("\n*** Spec for robot motor control ***\n\n")
        self._write_basic_setup_info(spec, self.SYSTEM_REGION)

        # locate correct subedge for key
        edge_key = None
        if len(graph.outgoing_edges_from_vertex(self)) != 1:
            raise exceptions.SpynnakerException(
                "This motor should only have one outgoing edge to the robot")

        partitions = partitioned_graph.\
            outgoing_edges_partitions_from_vertex(subvertex)
        for partition in partitions.values():
            edge_keys_and_masks = \
                routing_info.get_keys_and_masks_from_partition(partition)
            edge_key = edge_keys_and_masks[0].key

        # write params to memory
        spec.switch_write_focus(region=self.PARAMS_REGION)
        spec.write_value(data=edge_key)
        spec.write_value(data=self._speed)
        spec.write_value(data=self._sample_time)
        spec.write_value(data=self._update_time)
        spec.write_value(data=self._delay_time)
        spec.write_value(data=self._delta_threshold)
        if self._continue_if_not_different:
            spec.write_value(data=1)
        else:
            spec.write_value(data=0)

        # End-of-Spec:
        spec.end_specification()
        data_writer.close()

        return data_writer.filename
    def test_call(self):
        executor = HostExecuteDataSpecification()
        transceiver = _MockTransceiver(user_0_addresses={0: 1000})
        machine = virtual_machine(2, 2)
        tempdir = tempfile.mkdtemp()

        dsg_targets = DataSpecificationTargets(machine, tempdir)
        with dsg_targets.create_data_spec(0, 0, 0) as spec_writer:
            spec = DataSpecificationGenerator(spec_writer)
            spec.reserve_memory_region(0, 100)
            spec.reserve_memory_region(1, 100, empty=True)
            spec.reserve_memory_region(2, 100)
            spec.switch_write_focus(0)
            spec.write_value(0)
            spec.write_value(1)
            spec.write_value(2)
            spec.switch_write_focus(2)
            spec.write_value(3)
            spec.end_specification()

        region_sizes = dict()
        region_sizes[0, 0,
                     0] = (APP_PTR_TABLE_BYTE_SIZE + sum(spec.region_sizes))

        # Execute the spec
        targets = ExecutableTargets()
        targets.add_processor("text.aplx", 0, 0, 0,
                              ExecutableType.USES_SIMULATION_INTERFACE)
        infos = executor.execute_application_data_specs(
            transceiver,
            machine,
            30,
            dsg_targets,
            False,
            targets,
            report_folder=tempdir,
            region_sizes=region_sizes)

        # Test regions - although 3 are created, only 2 should be uploaded
        # (0 and 2), and only the data written should be uploaded
        # The space between regions should be as allocated regardless of
        # how much data is written
        header_and_table_size = (MAX_MEM_REGIONS + 2) * BYTES_PER_WORD
        regions = transceiver.regions_written
        self.assertEqual(len(regions), 4)

        # Base address for header and table
        self.assertEqual(regions[1][0], 0)

        # Base address for region 0 (after header and table)
        self.assertEqual(regions[2][0], header_and_table_size)

        # Base address for region 2
        self.assertEqual(regions[3][0], header_and_table_size + 200)

        # User 0 write address
        self.assertEqual(regions[0][0], 1000)

        # Size of header and table
        self.assertEqual(len(regions[1][1]), header_and_table_size)

        # Size of region 0
        self.assertEqual(len(regions[2][1]), 12)

        # Size of region 2
        self.assertEqual(len(regions[3][1]), 4)

        # Size of user 0
        self.assertEqual(len(regions[0][1]), 4)

        info = infos[(0, 0, 0)]
        self.assertEqual(info.memory_used, 372)
        self.assertEqual(info.memory_written, 88)
示例#19
0
    def generate_data_spec(self, subvertex, placement, subgraph, graph,
                           routing_info, hostname, graph_mapper, report_folder,
                           ip_tags, reverse_ip_tags, write_text_specs,
                           application_run_time_folder):
        """
        Model-specific construction of the data blocks necessary to
        build a group of IF_curr_exp neurons resident on a single core.
        :param subvertex:
        :param placement:
        :param subgraph:
        :param graph:
        :param routing_info:
        :param hostname:
        :param graph_mapper:
        :param report_folder:
        :param ip_tags:
        :param reverse_ip_tags:
        :param write_text_specs:
        :param application_run_time_folder:
        :return:
        """
        # Create new DataSpec for this processor:
        data_writer, report_writer = \
            self.get_data_spec_file_writers(
                placement.x, placement.y, placement.p, hostname, report_folder,
                write_text_specs, application_run_time_folder)

        spec = DataSpecificationGenerator(data_writer, report_writer)

        spec.comment("\n*** Spec for block of {} neurons ***\n".format(
            self.model_name))

        vertex_slice = graph_mapper.get_subvertex_slice(subvertex)

        # Calculate the size of the tables to be reserved in SDRAM:
        vertex_in_edges = graph.incoming_edges_to_vertex(self)
        neuron_params_sz = self.get_neuron_params_size(vertex_slice)
        synapse_params_sz = self.get_synapse_parameter_size(vertex_slice)
        master_pop_table_sz = self.get_population_table_size(
            vertex_slice, vertex_in_edges)

        subvert_in_edges = subgraph.incoming_subedges_from_subvertex(subvertex)
        all_syn_block_sz = self.get_exact_synaptic_block_memory_size(
            graph_mapper, subvert_in_edges)

        spike_hist_buff_sz = self.get_spike_buffer_size(vertex_slice)
        potential_hist_buff_sz = self.get_v_buffer_size(vertex_slice)
        gsyn_hist_buff_sz = self.get_g_syn_buffer_size(vertex_slice)
        synapse_dynamics_region_sz = self.get_synapse_dynamics_parameter_size(
            vertex_in_edges)

        # Declare random number generators and distributions:
        # TODO add random distrubtion stuff
        # self.write_random_distribution_declarations(spec)

        ring_buffer_shifts = self.get_ring_buffer_to_input_left_shifts(
            subvertex, subgraph, graph_mapper, self._spikes_per_second,
            self._machine_time_step, self._ring_buffer_sigma)

        weight_scales = [self.get_weight_scale(r) for r in ring_buffer_shifts]

        if logger.isEnabledFor(logging.DEBUG):
            for t, r, w in zip(self.get_synapse_targets(), ring_buffer_shifts,
                               weight_scales):
                logger.debug(
                    "Synapse type:%s - Ring buffer shift:%d, Max weight:%f" %
                    (t, r, w))

        # update projections for future use
        in_partitioned_edges = \
            subgraph.incoming_subedges_from_subvertex(subvertex)
        for partitioned_edge in in_partitioned_edges:
            partitioned_edge.weight_scales_setter(weight_scales)

        # Construct the data images needed for the Neuron:
        self._reserve_population_based_memory_regions(
            spec, neuron_params_sz, synapse_params_sz, master_pop_table_sz,
            all_syn_block_sz, spike_hist_buff_sz, potential_hist_buff_sz,
            gsyn_hist_buff_sz, synapse_dynamics_region_sz)

        # Remove extension to get application name
        self._write_setup_info(spec, spike_hist_buff_sz,
                               potential_hist_buff_sz, gsyn_hist_buff_sz)

        # Every outgoing edge from this vertex should have the same key
        key = None
        if len(subgraph.outgoing_subedges_from_subvertex(subvertex)) > 0:
            keys_and_masks = routing_info.get_keys_and_masks_from_subedge(
                subgraph.outgoing_subedges_from_subvertex(subvertex)[0])

            # NOTE: using the first key assigned as the key.  Should in future
            # get the list of keys and use one per neuron, to allow arbitrary
            # key and mask assignments
            key = keys_and_masks[0].key

        self._write_neuron_parameters(spec, key, subvertex, vertex_slice)

        self.write_synapse_parameters(spec, subvertex, vertex_slice)
        spec.write_array(ring_buffer_shifts)

        self.write_synaptic_matrix_and_master_population_table(
            spec, subvertex, all_syn_block_sz, weight_scales,
            constants.POPULATION_BASED_REGIONS.POPULATION_TABLE.value,
            constants.POPULATION_BASED_REGIONS.SYNAPTIC_MATRIX.value,
            routing_info, graph_mapper, subgraph)

        self.write_synapse_dynamics_parameters(
            spec, self._machine_time_step,
            constants.POPULATION_BASED_REGIONS.SYNAPSE_DYNAMICS.value,
            weight_scales)

        in_subedges = subgraph.incoming_subedges_from_subvertex(subvertex)
        for subedge in in_subedges:
            subedge.free_sublist()

        # End the writing of this specification:
        spec.end_specification()
        data_writer.close()
    def generate_data_spec(
            self, subvertex, placement, subgraph, graph, routing_info,
            hostname, graph_mapper, report_folder, ip_tags, reverse_ip_tags,
            write_text_specs, application_run_time_folder):
        """
        Model-specific construction of the data blocks necessary to
        build a group of IF_curr_exp neurons resident on a single core.
        :param subvertex:
        :param placement:
        :param subgraph:
        :param graph:
        :param routing_info:
        :param hostname:
        :param graph_mapper:
        :param report_folder:
        :param ip_tags:
        :param reverse_ip_tags:
        :param write_text_specs:
        :param application_run_time_folder:
        :return:
        """
        # Create new DataSpec for this processor:
        data_writer, report_writer = \
            self.get_data_spec_file_writers(
                placement.x, placement.y, placement.p, hostname, report_folder,
                write_text_specs, application_run_time_folder)

        spec = DataSpecificationGenerator(data_writer, report_writer)

        spec.comment("\n*** Spec for block of {} neurons ***\n"
                     .format(self.model_name))

        vertex_slice = graph_mapper.get_subvertex_slice(subvertex)

        # Calculate the size of the tables to be reserved in SDRAM:
        vertex_in_edges = graph.incoming_edges_to_vertex(self)
        neuron_params_sz = self.get_neuron_params_size(vertex_slice)
        synapse_params_sz = self.get_synapse_parameter_size(vertex_slice)
        master_pop_table_sz = self.get_population_table_size(vertex_slice,
                                                             vertex_in_edges)

        subvert_in_edges = subgraph.incoming_subedges_from_subvertex(subvertex)
        all_syn_block_sz = self.get_exact_synaptic_block_memory_size(
            graph_mapper, subvert_in_edges)

        spike_hist_buff_sz = self.get_spike_buffer_size(vertex_slice)
        potential_hist_buff_sz = self.get_v_buffer_size(vertex_slice)
        gsyn_hist_buff_sz = self.get_g_syn_buffer_size(vertex_slice)
        synapse_dynamics_region_sz = self.get_synapse_dynamics_parameter_size(
            vertex_in_edges)

        # Declare random number generators and distributions:
        # TODO add random distrubtion stuff
        # self.write_random_distribution_declarations(spec)

        ring_buffer_shifts = self.get_ring_buffer_to_input_left_shifts(
            subvertex, subgraph, graph_mapper, self._spikes_per_second,
            self._machine_time_step, self._ring_buffer_sigma)

        weight_scales = [self.get_weight_scale(r) for r in ring_buffer_shifts]

        if logger.isEnabledFor(logging.DEBUG):
            for t, r, w in zip(self.get_synapse_targets(), ring_buffer_shifts,
                               weight_scales):
                logger.debug(
                    "Synapse type:%s - Ring buffer shift:%d, Max weight:%f"
                    % (t, r, w))

        # update projections for future use
        in_partitioned_edges = \
            subgraph.incoming_subedges_from_subvertex(subvertex)
        for partitioned_edge in in_partitioned_edges:
            partitioned_edge.weight_scales_setter(weight_scales)

        # Construct the data images needed for the Neuron:
        self._reserve_population_based_memory_regions(
            spec, neuron_params_sz, synapse_params_sz,
            master_pop_table_sz, all_syn_block_sz,
            spike_hist_buff_sz, potential_hist_buff_sz, gsyn_hist_buff_sz,
            synapse_dynamics_region_sz)

        self._write_setup_info(spec, spike_hist_buff_sz,
                               potential_hist_buff_sz, gsyn_hist_buff_sz,
                               self._executable_constant)

        # Every outgoing edge from this vertex should have the same key
        key = None
        if len(subgraph.outgoing_subedges_from_subvertex(subvertex)) > 0:
            keys_and_masks = routing_info.get_keys_and_masks_from_subedge(
                subgraph.outgoing_subedges_from_subvertex(subvertex)[0])

            # NOTE: using the first key assigned as the key.  Should in future
            # get the list of keys and use one per neuron, to allow arbitrary
            # key and mask assignments
            key = keys_and_masks[0].key

        self._write_neuron_parameters(spec, key, subvertex, vertex_slice)

        self.write_synapse_parameters(spec, subvertex, vertex_slice)
        spec.write_array(ring_buffer_shifts)

        self.write_synaptic_matrix_and_master_population_table(
            spec, subvertex, all_syn_block_sz, weight_scales,
            constants.POPULATION_BASED_REGIONS.POPULATION_TABLE.value,
            constants.POPULATION_BASED_REGIONS.SYNAPTIC_MATRIX.value,
            routing_info, graph_mapper, subgraph)

        self.write_synapse_dynamics_parameters(
            spec, self._machine_time_step,
            constants.POPULATION_BASED_REGIONS.SYNAPSE_DYNAMICS.value,
            weight_scales)

        in_subedges = subgraph.incoming_subedges_from_subvertex(subvertex)
        for subedge in in_subedges:
            subedge.free_sublist()

        # End the writing of this specification:
        spec.end_specification()
        data_writer.close()
    def generate_data_spec(
            self, subvertex, placement, subgraph, graph, routing_info,
            hostname, graph_mapper, report_folder, ip_tags,
            reverse_ip_tags, write_text_specs, application_run_time_folder):

        # Create new DataSpec for this processor:
        data_writer, report_writer = self.get_data_spec_file_writers(
            placement.x, placement.y, placement.p, hostname, report_folder,
            write_text_specs, application_run_time_folder)
        spec = DataSpecificationGenerator(data_writer, report_writer)
        spec.comment("\n*** Spec for block of {} neurons ***\n".format(
            self.model_name))
        vertex_slice = graph_mapper.get_subvertex_slice(subvertex)

        # Get recording sizes - the order is important here as spikes will
        # require less space than voltage and voltage less than gsyn.  This
        # order ensures that the buffer size before receive is optimum for
        # all recording channels
        # TODO: Maybe split the buffer size before receive by channel?
        spike_history_sz, spike_buffering_needed = \
            self._get_recording_and_buffer_sizes(
                self._spike_buffer_max_size,
                self._spike_recorder.get_sdram_usage_in_bytes(
                    vertex_slice.n_atoms, self._no_machine_time_steps))
        v_history_sz, v_buffering_needed = \
            self._get_recording_and_buffer_sizes(
                self._v_buffer_max_size,
                self._v_recorder.get_sdram_usage_in_bytes(
                    vertex_slice.n_atoms, self._no_machine_time_steps))
        gsyn_history_sz, gsyn_buffering_needed = \
            self._get_recording_and_buffer_sizes(
                self._gsyn_buffer_max_size,
                self._gsyn_recorder.get_sdram_usage_in_bytes(
                    vertex_slice.n_atoms, self._no_machine_time_steps))
        buffer_size_before_receive = self._buffer_size_before_receive
        if (not spike_buffering_needed and not v_buffering_needed and
                not gsyn_buffering_needed):
            buffer_size_before_receive = max((
                spike_history_sz, v_history_sz, gsyn_history_sz)) + 256

        # Reserve memory regions
        self._reserve_memory_regions(
            spec, vertex_slice, spike_history_sz, v_history_sz,
            gsyn_history_sz)

        # Declare random number generators and distributions:
        # TODO add random distribution stuff
        # self.write_random_distribution_declarations(spec)

        # Get the key - use only the first edge
        key = None
        if len(subgraph.outgoing_subedges_from_subvertex(subvertex)) > 0:
            keys_and_masks = routing_info.get_keys_and_masks_from_subedge(
                subgraph.outgoing_subedges_from_subvertex(subvertex)[0])

            # NOTE: using the first key assigned as the key.  Should in future
            # get the list of keys and use one per neuron, to allow arbitrary
            # key and mask assignments
            key = keys_and_masks[0].key

        # Write the regions
        self._write_setup_info(
            spec, spike_history_sz, v_history_sz, gsyn_history_sz, ip_tags,
            buffer_size_before_receive, self._time_between_requests)
        self._write_neuron_parameters(spec, key, vertex_slice)

        # allow the synaptic matrix to write its data specable data
        self._synapse_manager.write_data_spec(
            spec, self, vertex_slice, subvertex, placement, subgraph, graph,
            routing_info, hostname, graph_mapper)

        # End the writing of this specification:
        spec.end_specification()
        data_writer.close()

        return [data_writer.filename]
示例#22
0
    def generate_data_spec(self, subvertex, placement, partitioned_graph,
                           graph, routing_info, hostname, graph_mapper,
                           report_folder, ip_tags, reverse_ip_tags,
                           write_text_specs, application_run_time_folder):
        data_writer, report_writer = \
            self.get_data_spec_file_writers(
                placement.x, placement.y, placement.p, hostname, report_folder,
                write_text_specs, application_run_time_folder)

        spec = DataSpecificationGenerator(data_writer, report_writer)

        # Reserve memory:
        spec.comment("\nReserving memory space for data regions:\n\n")

        # ###################################################################
        # Reserve SDRAM space for memory areas:
        vertex_slice = graph_mapper.get_subvertex_slice(subvertex)
        n_words_per_stage = int(math.ceil(vertex_slice.n_atoms / 32.0))
        delay_params_sz = 4 * (_DELAY_PARAM_HEADER_WORDS +
                               (self._n_delay_stages * n_words_per_stage))

        spec.reserve_memory_region(
            region=(DelayExtensionPartitionedVertex._DELAY_EXTENSION_REGIONS.
                    SYSTEM.value),
            size=common_constants.DATA_SPECABLE_BASIC_SETUP_INFO_N_WORDS * 4,
            label='setup')

        spec.reserve_memory_region(
            region=(DelayExtensionPartitionedVertex._DELAY_EXTENSION_REGIONS.
                    DELAY_PARAMS.value),
            size=delay_params_sz,
            label='delay_params')

        subvertex.reserve_provenance_data_region(spec)

        self.write_setup_info(spec)

        spec.comment("\n*** Spec for Delay Extension Instance ***\n\n")

        key = None
        partitions = partitioned_graph.\
            outgoing_edges_partitions_from_vertex(subvertex)
        for partition in partitions.values():
            keys_and_masks = \
                routing_info.get_keys_and_masks_from_partition(partition)

            # NOTE: using the first key assigned as the key.  Should in future
            # get the list of keys and use one per neuron, to allow arbitrary
            # key and mask assignments
            key = keys_and_masks[0].key

        incoming_key = None
        incoming_mask = None
        incoming_edges = partitioned_graph.incoming_subedges_from_subvertex(
            subvertex)

        for incoming_edge in incoming_edges:
            incoming_slice = graph_mapper.get_subvertex_slice(
                incoming_edge.pre_subvertex)
            if (incoming_slice.lo_atom == vertex_slice.lo_atom
                    and incoming_slice.hi_atom == vertex_slice.hi_atom):
                partition = partitioned_graph.get_partition_of_subedge(
                    incoming_edge)
                keys_and_masks = \
                    routing_info.get_keys_and_masks_from_partition(partition)
                incoming_key = keys_and_masks[0].key
                incoming_mask = keys_and_masks[0].mask

        self.write_delay_parameters(spec, vertex_slice, key, incoming_key,
                                    incoming_mask)
        # End-of-Spec:
        spec.end_specification()
        data_writer.close()

        return data_writer.filename
    def test_write_synaptic_matrix_and_master_population_table(self):
        MockSimulator.setup()

        default_config_paths = os.path.join(
            os.path.dirname(abstract_spinnaker_common.__file__),
            AbstractSpiNNakerCommon.CONFIG_FILE_NAME)

        config = conf_loader.load_config(
            AbstractSpiNNakerCommon.CONFIG_FILE_NAME, default_config_paths)
        config.set("Simulation", "one_to_one_connection_dtcm_max_bytes", 40)

        machine_time_step = 1000.0

        pre_app_vertex = SimpleApplicationVertex(10)
        pre_vertex = SimpleMachineVertex(resources=None)
        pre_vertex_slice = Slice(0, 9)
        post_app_vertex = SimpleApplicationVertex(10)
        post_vertex = SimpleMachineVertex(resources=None)
        post_vertex_slice = Slice(0, 9)
        post_slice_index = 0
        one_to_one_connector_1 = OneToOneConnector(None)
        one_to_one_connector_1.set_projection_information(
            pre_app_vertex, post_app_vertex, None, machine_time_step)
        one_to_one_connector_1.set_weights_and_delays(1.5, 1.0)
        one_to_one_connector_2 = OneToOneConnector(None)
        one_to_one_connector_2.set_projection_information(
            pre_app_vertex, post_app_vertex, None, machine_time_step)
        one_to_one_connector_2.set_weights_and_delays(2.5, 2.0)
        all_to_all_connector = AllToAllConnector(None)
        all_to_all_connector.set_projection_information(
            pre_app_vertex, post_app_vertex, None, machine_time_step)
        all_to_all_connector.set_weights_and_delays(4.5, 4.0)
        direct_synapse_information_1 = SynapseInformation(
            one_to_one_connector_1, SynapseDynamicsStatic(), 0)
        direct_synapse_information_2 = SynapseInformation(
            one_to_one_connector_2, SynapseDynamicsStatic(), 1)
        all_to_all_synapse_information = SynapseInformation(
            all_to_all_connector, SynapseDynamicsStatic(), 0)
        app_edge = ProjectionApplicationEdge(
            pre_app_vertex, post_app_vertex, direct_synapse_information_1)
        app_edge.add_synapse_information(direct_synapse_information_2)
        app_edge.add_synapse_information(all_to_all_synapse_information)
        machine_edge = ProjectionMachineEdge(
            app_edge.synapse_information, pre_vertex, post_vertex)
        partition_name = "TestPartition"

        graph = MachineGraph("Test")
        graph.add_vertex(pre_vertex)
        graph.add_vertex(post_vertex)
        graph.add_edge(machine_edge, partition_name)

        graph_mapper = GraphMapper()
        graph_mapper.add_vertex_mapping(
            pre_vertex, pre_vertex_slice, pre_app_vertex)
        graph_mapper.add_vertex_mapping(
            post_vertex, post_vertex_slice, post_app_vertex)
        graph_mapper.add_edge_mapping(machine_edge, app_edge)

        weight_scales = [4096.0, 4096.0]

        key = 0
        routing_info = RoutingInfo()
        routing_info.add_partition_info(PartitionRoutingInfo(
            [BaseKeyAndMask(key, 0xFFFFFFF0)],
            graph.get_outgoing_edge_partition_starting_at_vertex(
                pre_vertex, partition_name)))

        temp_spec = tempfile.mktemp()
        spec_writer = FileDataWriter(temp_spec)
        spec = DataSpecificationGenerator(spec_writer, None)
        master_pop_sz = 1000
        master_pop_region = 0
        all_syn_block_sz = 2000
        synapse_region = 1
        spec.reserve_memory_region(master_pop_region, master_pop_sz)
        spec.reserve_memory_region(synapse_region, all_syn_block_sz)

        synapse_type = MockSynapseType()

        synaptic_manager = SynapticManager(
            synapse_type=synapse_type, ring_buffer_sigma=5.0,
            spikes_per_second=100.0, config=config)
        synaptic_manager._write_synaptic_matrix_and_master_population_table(
            spec, [post_vertex_slice], post_slice_index, post_vertex,
            post_vertex_slice, all_syn_block_sz, weight_scales,
            master_pop_region, synapse_region, routing_info, graph_mapper,
            graph, machine_time_step)
        spec.end_specification()
        spec_writer.close()

        spec_reader = FileDataReader(temp_spec)
        executor = DataSpecificationExecutor(
            spec_reader, master_pop_sz + all_syn_block_sz)
        executor.execute()

        master_pop_table = executor.get_region(0)
        synaptic_matrix = executor.get_region(1)

        all_data = bytearray()
        all_data.extend(master_pop_table.region_data[
            :master_pop_table.max_write_pointer])
        all_data.extend(synaptic_matrix.region_data[
            :synaptic_matrix.max_write_pointer])
        master_pop_table_address = 0
        synaptic_matrix_address = master_pop_table.max_write_pointer
        direct_synapses_address = struct.unpack_from(
            "<I", synaptic_matrix.region_data)[0]
        direct_synapses_address += synaptic_matrix_address + 8
        indirect_synapses_address = synaptic_matrix_address + 4
        placement = Placement(None, 0, 0, 1)
        transceiver = MockTransceiverRawData(all_data)

        # Get the master population table details
        items = synaptic_manager._poptable_type\
            .extract_synaptic_matrix_data_location(
                key, master_pop_table_address, transceiver,
                placement.x, placement.y)

        # The first entry should be direct, but the rest should be indirect;
        # the second is potentially direct, but has been restricted by the
        # restriction on the size of the direct matrix
        assert len(items) == 3

        # TODO: This has been changed because direct matrices are disabled!
        assert not items[0][2]
        assert not items[1][2]
        assert not items[2][2]

        data_1, row_len_1 = synaptic_manager._retrieve_synaptic_block(
            transceiver=transceiver, placement=placement,
            master_pop_table_address=master_pop_table_address,
            indirect_synapses_address=indirect_synapses_address,
            direct_synapses_address=direct_synapses_address, key=key,
            n_rows=pre_vertex_slice.n_atoms, index=0,
            using_extra_monitor_cores=False)
        connections_1 = synaptic_manager._synapse_io.read_synapses(
            direct_synapse_information_1, pre_vertex_slice, post_vertex_slice,
            row_len_1, 0, 2, weight_scales, data_1, None,
            app_edge.n_delay_stages, machine_time_step)

        # The first matrix is a 1-1 matrix, so row length is 1
        assert row_len_1 == 1

        # Check that all the connections have the right weight and delay
        assert len(connections_1) == post_vertex_slice.n_atoms
        assert all([conn["weight"] == 1.5 for conn in connections_1])
        assert all([conn["delay"] == 1.0 for conn in connections_1])

        data_2, row_len_2 = synaptic_manager._retrieve_synaptic_block(
            transceiver=transceiver, placement=placement,
            master_pop_table_address=master_pop_table_address,
            indirect_synapses_address=indirect_synapses_address,
            direct_synapses_address=direct_synapses_address, key=key,
            n_rows=pre_vertex_slice.n_atoms, index=1,
            using_extra_monitor_cores=False)
        connections_2 = synaptic_manager._synapse_io.read_synapses(
            direct_synapse_information_2, pre_vertex_slice, post_vertex_slice,
            row_len_2, 0, 2, weight_scales, data_2, None,
            app_edge.n_delay_stages, machine_time_step)

        # The second matrix is a 1-1 matrix, so row length is 1
        assert row_len_2 == 1

        # Check that all the connections have the right weight and delay
        assert len(connections_2) == post_vertex_slice.n_atoms
        assert all([conn["weight"] == 2.5 for conn in connections_2])
        assert all([conn["delay"] == 2.0 for conn in connections_2])

        data_3, row_len_3 = synaptic_manager._retrieve_synaptic_block(
            transceiver=transceiver, placement=placement,
            master_pop_table_address=master_pop_table_address,
            indirect_synapses_address=indirect_synapses_address,
            direct_synapses_address=direct_synapses_address, key=key,
            n_rows=pre_vertex_slice.n_atoms, index=2,
            using_extra_monitor_cores=False)
        connections_3 = synaptic_manager._synapse_io.read_synapses(
            all_to_all_synapse_information, pre_vertex_slice,
            post_vertex_slice, row_len_3, 0, 2, weight_scales, data_3, None,
            app_edge.n_delay_stages, machine_time_step)

        # The third matrix is an all-to-all matrix, so length is n_atoms
        assert row_len_3 == post_vertex_slice.n_atoms

        # Check that all the connections have the right weight and delay
        assert len(connections_3) == \
            post_vertex_slice.n_atoms * pre_vertex_slice.n_atoms
        assert all([conn["weight"] == 4.5 for conn in connections_3])
        assert all([conn["delay"] == 4.0 for conn in connections_3])
    def test_call(self):
        executor = HostExecuteDataSpecification()
        transceiver = _MockTransceiver(user_0_addresses={0: 1000})
        machine = VirtualMachine(2, 2)
        tempdir = tempfile.mkdtemp()

        dsg_targets = DataSpecificationTargets(machine, tempdir)
        with dsg_targets.create_data_spec(0, 0, 0) as spec_writer:
            spec = DataSpecificationGenerator(spec_writer)
            spec.reserve_memory_region(0, 100)
            spec.reserve_memory_region(1, 100, empty=True)
            spec.reserve_memory_region(2, 100)
            spec.switch_write_focus(0)
            spec.write_value(0)
            spec.write_value(1)
            spec.write_value(2)
            spec.switch_write_focus(2)
            spec.write_value(3)
            spec.end_specification()

        infos = executor.__call__(
            transceiver, machine, 30, dsg_targets, tempdir)

        # Test regions - although 3 are created, only 2 should be uploaded
        # (0 and 2), and only the data written should be uploaded
        # The space between regions should be as allocated regardless of
        # how much data is written
        header_and_table_size = (MAX_MEM_REGIONS + 2) * 4
        regions = transceiver.regions_written
        self.assertEqual(len(regions), 4)

        # Base address for header and table
        self.assertEqual(regions[0][0], 0)

        # Base address for region 0 (after header and table)
        self.assertEqual(regions[1][0], header_and_table_size)

        # Base address for region 2
        self.assertEqual(regions[2][0], header_and_table_size + 200)

        # User 0 write address
        self.assertEqual(regions[3][0], 1000)

        # Size of header and table
        self.assertEqual(len(regions[0][1]), header_and_table_size)

        # Size of region 0
        self.assertEqual(len(regions[1][1]), 12)

        # Size of region 2
        self.assertEqual(len(regions[2][1]), 4)

        # Size of user 0
        self.assertEqual(len(regions[3][1]), 4)

        info = infos[(0, 0, 0)]
        self.assertEqual(info["memory_used"], 372)
        self.assertEqual(info["memory_written"], 88)
    def generate_data_spec(
            self, subvertex, placement, sub_graph, graph, routing_info,
            hostname, graph_mapper, report_folder, ip_tags, reverse_ip_tags,
            write_text_specs, application_run_time_folder):
        """
        Model-specific construction of the data blocks necessary to build a
        single Delay Extension Block on one core.
        """
        data_writer, report_writer = \
            self.get_data_spec_file_writers(
                placement.x, placement.y, placement.p, hostname, report_folder,
                write_text_specs, application_run_time_folder)

        spec = DataSpecificationGenerator(data_writer, report_writer)

        # Reserve memory:
        spec.comment("\nReserving memory space for data regions:\n\n")

        # ###################################################################
        # Reserve SDRAM space for memory areas:

        delay_params_header_words = 3

        vertex_slice = graph_mapper.get_subvertex_slice(subvertex)

        n_atoms = vertex_slice.hi_atom - vertex_slice.lo_atom + 1
        block_len_words = int(math.ceil(n_atoms / 32.0))
        num_delay_blocks, delay_blocks = self.get_delay_blocks(
            subvertex, sub_graph, graph_mapper)
        delay_params_sz = 4 * (delay_params_header_words +
                               (num_delay_blocks * block_len_words))

        spec.reserve_memory_region(
            region=self._DELAY_EXTENSION_REGIONS.SYSTEM.value,
            size=constants.DATA_SPECABLE_BASIC_SETUP_INFO_N_WORDS * 4,
            label='setup')

        spec.reserve_memory_region(
            region=self._DELAY_EXTENSION_REGIONS.DELAY_PARAMS.value,
            size=delay_params_sz, label='delay_params')

        self.write_setup_info(spec, 0)

        spec.comment("\n*** Spec for Delay Extension Instance ***\n\n")

        key = None
        if len(sub_graph.outgoing_subedges_from_subvertex(subvertex)) > 0:
            keys_and_masks = routing_info.get_keys_and_masks_from_subedge(
                sub_graph.outgoing_subedges_from_subvertex(subvertex)[0])

            # NOTE: using the first key assigned as the key.  Should in future
            # get the list of keys and use one per neuron, to allow arbitrary
            # key and mask assignments
            key = keys_and_masks[0].key

        self.write_delay_parameters(spec, placement.x, placement.y,
                                    placement.p, subvertex, num_delay_blocks,
                                    delay_blocks, vertex_slice, key)
        # End-of-Spec:
        spec.end_specification()
        data_writer.close()
    def generate_data_spec(
            self, subvertex, placement, sub_graph, graph, routing_info,
            hostname, graph_mapper, report_folder, ip_tags, reverse_ip_tags,
            write_text_specs, application_run_time_folder):
        """
        Model-specific construction of the data blocks necessary to build a
        single external retina device.
        :param subvertex:
        :param placement:
        :param sub_graph:
        :param graph:
        :param routing_info:
        :param hostname:
        :param graph_mapper:
        :param report_folder:
        :param ip_tags:
        :param reverse_ip_tags:
        :param write_text_specs:
        :param application_run_time_folder:
        :return:
        """

        data_writer, report_writer = \
            self.get_data_spec_file_writers(
                placement.x, placement.y, placement.p, hostname, report_folder,
                write_text_specs, application_run_time_folder)

        spec = DataSpecificationGenerator(data_writer, report_writer)

        # reserve region - add a word for the region size
        n_command_bytes = self._get_n_command_bytes()
        self._reserve_memory_regions(spec, n_command_bytes + 4)

        # Write system region
        spec.comment("\n*** Spec for multi cast source ***\n\n")
        self._write_basic_setup_info(spec, self.SYSTEM_REGION)

        # Go through the times and replace negative times with positive ones
        new_times = set()
        for time in self._times_with_commands:
            if time < 0 and self._no_machine_time_steps is not None:
                real_time = self._no_machine_time_steps + (time + 1)
                if time in self._commands_with_payloads:
                    if real_time in self._commands_with_payloads:
                        self._commands_with_payloads[real_time].extend(
                            self._commands_with_payloads[time])
                    else:
                        self._commands_with_payloads[real_time] = \
                            self._commands_with_payloads[time]
                    del self._commands_with_payloads[time]
                if time in self._commands_without_payloads:
                    if real_time in self._commands_without_payloads:
                        self._commands_without_payloads[real_time].extend(
                            self._commands_without_payloads[time])
                    else:
                        self._commands_without_payloads[real_time] = \
                            self._commands_without_payloads[time]
                    del self._commands_without_payloads[time]
                new_times.add(real_time)
            # if runtime is infinite, then theres no point storing end of
            # simulation events, as they will never occur
            elif time < 0 and self._no_machine_time_steps is None:
                if time in self._commands_with_payloads:
                    del self._commands_with_payloads[time]
                if time in self._commands_without_payloads:
                    del self._commands_without_payloads[time]
            else:
                new_times.add(time)

        # write commands
        spec.switch_write_focus(region=self.COMMANDS)
        spec.write_value(n_command_bytes)
        for time in sorted(new_times):

            # Gather the different types of commands
            with_payload = list()
            if time in self._commands_with_payloads:
                with_payload = self._commands_with_payloads[time]
            without_payload = list()
            if time in self._commands_without_payloads:
                without_payload = self._commands_without_payloads[time]

            spec.write_value(time)

            spec.write_value(len(with_payload))
            for command in with_payload:
                spec.write_value(self._get_key(command, graph_mapper,
                                               routing_info))
                payload = command.get_payload(routing_info, sub_graph,
                                              graph_mapper)
                spec.write_value(payload)
                spec.write_value(command.repeat << 16 |
                                 command.delay_between_repeats)

            spec.write_value(len(without_payload))
            for command in without_payload:
                spec.write_value(self._get_key(command, graph_mapper,
                                               routing_info))
                spec.write_value(command.repeat << 16 |
                                 command.delay_between_repeats)

        # End-of-Spec:
        spec.end_specification()
        data_writer.close()
示例#27
0
    def test_call(self):
        executor = HostExecuteDataSpecification()
        transceiver = _MockTransceiver(user_0_addresses={0: 1000})
        machine = VirtualMachine(2, 2)

        # Write a data spec to execute
        temp_spec = mktemp()
        spec_writer = FileDataWriter(temp_spec)
        spec = DataSpecificationGenerator(spec_writer)
        spec.reserve_memory_region(0, 100)
        spec.reserve_memory_region(1, 100, empty=True)
        spec.reserve_memory_region(2, 100)
        spec.switch_write_focus(0)
        spec.write_value(0)
        spec.write_value(1)
        spec.write_value(2)
        spec.switch_write_focus(2)
        spec.write_value(3)
        spec.end_specification()

        # Execute the spec
        dsg_targets = {(0, 0, 0): temp_spec}
        executor.__call__(transceiver, machine, 30, dsg_targets)

        # Test regions - although 3 are created, only 2 should be uploaded
        # (0 and 2), and only the data written should be uploaded
        # The space between regions should be as allocated regardless of
        # how much data is written
        header_and_table_size = (constants.MAX_MEM_REGIONS + 2) * 4
        regions = transceiver.regions_written
        self.assertEqual(len(regions), 4)

        # Base address for header and table
        self.assertEqual(regions[0][0], 0)

        # Base address for region 0 (after header and table)
        self.assertEqual(regions[1][0], header_and_table_size)

        # Base address for region 2
        self.assertEqual(regions[2][0], header_and_table_size + 200)

        # User 0 write address
        self.assertEqual(regions[3][0], 1000)

        # Size of header and table
        self.assertEqual(len(regions[0][1]), header_and_table_size)

        # Size of region 0
        self.assertEqual(len(regions[1][1]), 12)

        # Size of region 2
        self.assertEqual(len(regions[2][1]), 4)

        # Size of user 0
        self.assertEqual(len(regions[3][1]), 4)
示例#28
0
    def generate_data_spec(self, subvertex, placement, sub_graph, graph,
                           routing_info, hostname, graph_mapper, report_folder,
                           ip_tags, reverse_ip_tags, write_text_specs,
                           application_run_time_folder):
        """
        Model-specific construction of the data blocks necessary to build a
        single Delay Extension Block on one core.
        """
        data_writer, report_writer = \
            self.get_data_spec_file_writers(
                placement.x, placement.y, placement.p, hostname, report_folder,
                write_text_specs, application_run_time_folder)

        spec = DataSpecificationGenerator(data_writer, report_writer)

        # Reserve memory:
        spec.comment("\nReserving memory space for data regions:\n\n")

        # ###################################################################
        # Reserve SDRAM space for memory areas:

        delay_params_header_words = 3

        vertex_slice = graph_mapper.get_subvertex_slice(subvertex)

        n_atoms = vertex_slice.hi_atom - vertex_slice.lo_atom + 1
        block_len_words = int(math.ceil(n_atoms / 32.0))
        num_delay_blocks, delay_blocks = self.get_delay_blocks(
            subvertex, sub_graph, graph_mapper)
        delay_params_sz = 4 * (delay_params_header_words +
                               (num_delay_blocks * block_len_words))

        spec.reserve_memory_region(
            region=self._DELAY_EXTENSION_REGIONS.SYSTEM.value,
            size=constants.DATA_SPECABLE_BASIC_SETUP_INFO_N_WORDS * 4,
            label='setup')

        spec.reserve_memory_region(
            region=self._DELAY_EXTENSION_REGIONS.DELAY_PARAMS.value,
            size=delay_params_sz,
            label='delay_params')

        self.write_setup_info(spec, 0)

        spec.comment("\n*** Spec for Delay Extension Instance ***\n\n")

        key = None
        if len(sub_graph.outgoing_subedges_from_subvertex(subvertex)) > 0:
            keys_and_masks = routing_info.get_keys_and_masks_from_subedge(
                sub_graph.outgoing_subedges_from_subvertex(subvertex)[0])

            # NOTE: using the first key assigned as the key.  Should in future
            # get the list of keys and use one per neuron, to allow arbitrary
            # key and mask assignments
            key = keys_and_masks[0].key

        self.write_delay_parameters(spec, placement.x, placement.y,
                                    placement.p, subvertex, num_delay_blocks,
                                    delay_blocks, vertex_slice, key)
        # End-of-Spec:
        spec.end_specification()
        data_writer.close()
示例#29
0
    def generate_data_spec(
            self, subvertex, placement, partitioned_graph, graph, routing_info,
            hostname, graph_mapper, report_folder, ip_tags, reverse_ip_tags,
            write_text_specs, application_run_time_folder):
        data_writer, report_writer = \
            self.get_data_spec_file_writers(
                placement.x, placement.y, placement.p, hostname, report_folder,
                write_text_specs, application_run_time_folder)

        spec = DataSpecificationGenerator(data_writer, report_writer)

        # Reserve memory:
        spec.comment("\nReserving memory space for data regions:\n\n")

        # ###################################################################
        # Reserve SDRAM space for memory areas:
        vertex_slice = graph_mapper.get_subvertex_slice(subvertex)
        n_words_per_stage = int(math.ceil(vertex_slice.n_atoms / 32.0))
        delay_params_sz = 4 * (_DELAY_PARAM_HEADER_WORDS +
                               (self._n_delay_stages * n_words_per_stage))

        spec.reserve_memory_region(
            region=(
                DelayExtensionPartitionedVertex.
                _DELAY_EXTENSION_REGIONS.SYSTEM.value),
            size=common_constants.DATA_SPECABLE_BASIC_SETUP_INFO_N_WORDS * 4,
            label='setup')

        spec.reserve_memory_region(
            region=(
                DelayExtensionPartitionedVertex.
                _DELAY_EXTENSION_REGIONS.DELAY_PARAMS.value),
            size=delay_params_sz, label='delay_params')

        subvertex.reserve_provenance_data_region(spec)

        self.write_setup_info(spec)

        spec.comment("\n*** Spec for Delay Extension Instance ***\n\n")

        key = None
        partitions = partitioned_graph.\
            outgoing_edges_partitions_from_vertex(subvertex)
        for partition in partitions.values():
            keys_and_masks = \
                routing_info.get_keys_and_masks_from_partition(partition)

            # NOTE: using the first key assigned as the key.  Should in future
            # get the list of keys and use one per neuron, to allow arbitrary
            # key and mask assignments
            key = keys_and_masks[0].key

        incoming_key = None
        incoming_mask = None
        incoming_edges = partitioned_graph.incoming_subedges_from_subvertex(
            subvertex)


        for incoming_edge in incoming_edges:
            incoming_slice = graph_mapper.get_subvertex_slice(
                incoming_edge.pre_subvertex)
            if (incoming_slice.lo_atom == vertex_slice.lo_atom and
                    incoming_slice.hi_atom == vertex_slice.hi_atom):
                partition = partitioned_graph.get_partition_of_subedge(
                    incoming_edge)
                keys_and_masks = \
                    routing_info.get_keys_and_masks_from_partition(partition)
                incoming_key = keys_and_masks[0].key
                incoming_mask = keys_and_masks[0].mask

        self.write_delay_parameters(
            spec, vertex_slice, key, incoming_key, incoming_mask)
        # End-of-Spec:
        spec.end_specification()
        data_writer.close()

        return data_writer.filename
示例#30
0
    def generate_data_spec(self, subvertex, placement, partitioned_graph,
                           graph, routing_info, hostname, graph_mapper,
                           report_folder, ip_tags, reverse_ip_tags,
                           write_text_specs, application_run_time_folder):

        # Create new DataSpec for this processor:
        data_writer, report_writer = self.get_data_spec_file_writers(
            placement.x, placement.y, placement.p, hostname, report_folder,
            write_text_specs, application_run_time_folder)
        spec = DataSpecificationGenerator(data_writer, report_writer)
        spec.comment("\n*** Spec for block of {} neurons ***\n".format(
            self.model_name))
        vertex_slice = graph_mapper.get_subvertex_slice(subvertex)

        # Get recording sizes - the order is important here as spikes will
        # require less space than voltage and voltage less than gsyn.  This
        # order ensures that the buffer size before receive is optimum for
        # all recording channels
        # TODO: Maybe split the buffer size before receive by channel?
        spike_buffer_size = self._spike_recorder.get_sdram_usage_in_bytes(
            vertex_slice.n_atoms, self._no_machine_time_steps)
        v_buffer_size = self._v_recorder.get_sdram_usage_in_bytes(
            vertex_slice.n_atoms, self._no_machine_time_steps)
        gsyn_buffer_size = self._gsyn_recorder.get_sdram_usage_in_bytes(
            vertex_slice.n_atoms, self._no_machine_time_steps)
        spike_history_sz = recording_utils.get_buffer_sizes(
            self._spike_buffer_max_size, spike_buffer_size,
            self._enable_buffered_recording)
        v_history_sz = recording_utils.get_buffer_sizes(
            self._v_buffer_max_size, v_buffer_size,
            self._enable_buffered_recording)
        gsyn_history_sz = recording_utils.get_buffer_sizes(
            self._gsyn_buffer_max_size, gsyn_buffer_size,
            self._enable_buffered_recording)
        spike_buffering_needed = recording_utils.needs_buffering(
            self._spike_buffer_max_size, spike_buffer_size,
            self._enable_buffered_recording)
        v_buffering_needed = recording_utils.needs_buffering(
            self._v_buffer_max_size, v_buffer_size,
            self._enable_buffered_recording)
        gsyn_buffering_needed = recording_utils.needs_buffering(
            self._gsyn_buffer_max_size, gsyn_buffer_size,
            self._enable_buffered_recording)
        buffer_size_before_receive = self._buffer_size_before_receive
        if (not spike_buffering_needed and not v_buffering_needed
                and not gsyn_buffering_needed):
            buffer_size_before_receive = max(
                (spike_history_sz, v_history_sz, gsyn_history_sz)) + 256

        # Reserve memory regions
        self._reserve_memory_regions(spec, vertex_slice, spike_history_sz,
                                     v_history_sz, gsyn_history_sz, subvertex)

        # Declare random number generators and distributions:
        # TODO add random distribution stuff
        # self.write_random_distribution_declarations(spec)

        # Get the key - use only the first edge
        key = None

        for partition in partitioned_graph.\
                outgoing_edges_partitions_from_vertex(subvertex).values():

            keys_and_masks = \
                routing_info.get_keys_and_masks_from_partition(partition)

            # NOTE: using the first key assigned as the key.  Should in future
            # get the list of keys and use one per neuron, to allow arbitrary
            # key and mask assignments
            key = keys_and_masks[0].key

        # Write the regions
        self._write_setup_info(spec, spike_history_sz, v_history_sz,
                               gsyn_history_sz, ip_tags,
                               buffer_size_before_receive,
                               self._time_between_requests, subvertex)
        self._write_neuron_parameters(spec, key, vertex_slice)

        # allow the synaptic matrix to write its data spec-able data
        self._synapse_manager.write_data_spec(spec, self, vertex_slice,
                                              subvertex, placement,
                                              partitioned_graph, graph,
                                              routing_info, graph_mapper,
                                              self._input_type)

        # End the writing of this specification:
        spec.end_specification()
        data_writer.close()

        return data_writer.filename