Ejemplo n.º 1
0
    def test_retrieve_synaptic_block(self):
        default_config_paths = os.path.join(
            os.path.dirname(abstract_spinnaker_common.__file__),
            AbstractSpiNNakerCommon.CONFIG_FILE_NAME)

        config = conf_loader.load_config(
            AbstractSpiNNakerCommon.CONFIG_FILE_NAME, default_config_paths)

        key = 0

        synaptic_manager = SynapticManager(
            synapse_type=None, ring_buffer_sigma=5.0, spikes_per_second=100.0,
            config=config,
            population_table_type=MockMasterPopulationTable(
                {key: [(1, 0, False)]}),
            synapse_io=MockSynapseIO())

        transceiver = MockTransceiverRawData(bytearray(16))
        placement = Placement(None, 0, 0, 1)

        first_block, row_len_1 = synaptic_manager._retrieve_synaptic_block(
            transceiver=transceiver, placement=placement,
            master_pop_table_address=0, indirect_synapses_address=0,
            direct_synapses_address=0, key=key, n_rows=1, index=0,
            using_extra_monitor_cores=False)
        same_block, row_len_1_2 = synaptic_manager._retrieve_synaptic_block(
            transceiver=transceiver, placement=placement,
            master_pop_table_address=0, indirect_synapses_address=0,
            direct_synapses_address=0, key=key, n_rows=1, index=0,
            using_extra_monitor_cores=False)
        synaptic_manager.clear_connection_cache()
        different_block, row_len_2 = synaptic_manager._retrieve_synaptic_block(
            transceiver=transceiver, placement=placement,
            master_pop_table_address=0, indirect_synapses_address=0,
            direct_synapses_address=0, key=key, n_rows=1, index=0,
            using_extra_monitor_cores=False)

        # Check that the row lengths are all the same
        assert row_len_1 == row_len_1_2
        assert row_len_1 == row_len_2

        # Check that the block retrieved twice without reset is cached
        assert id(first_block) == id(same_block)

        # Check that the block after reset is not a copy
        assert id(first_block) != id(different_block)
Ejemplo n.º 2
0
class AbstractPopulationVertex(
        ApplicationVertex, AbstractGeneratesDataSpecification,
        AbstractHasAssociatedBinary, AbstractContainsUnits,
        AbstractSpikeRecordable, AbstractNeuronRecordable,
        AbstractProvidesOutgoingPartitionConstraints,
        AbstractProvidesIncomingPartitionConstraints,
        AbstractPopulationInitializable, AbstractPopulationSettable,
        AbstractChangableAfterRun, AbstractHasGlobalMaxAtoms,
        AbstractRewritesDataSpecification, AbstractReadParametersBeforeSet,
        AbstractAcceptsIncomingSynapses, ProvidesKeyToAtomMappingImpl):
    """ Underlying vertex model for Neural Populations.
    """
    __slots__ = [
        "_additional_input", "_binary", "_buffer_size_before_receive",
        "_change_requires_mapping",
        "_change_requires_neuron_parameters_reload",
        "_incoming_spike_buffer_size", "_input_type",
        "_maximum_sdram_for_buffering", "_minimum_buffer_sdram", "_model_name",
        "_n_atoms", "_n_profile_samples", "_neuron_model", "_neuron_recorder",
        "_receive_buffer_host", "_receive_buffer_port", "_spike_recorder",
        "_synapse_manager", "_threshold_type", "_time_between_requests",
        "_units", "_using_auto_pause_and_resume"
    ]

    BASIC_MALLOC_USAGE = 2

    # recording region ids
    SPIKE_RECORDING_REGION = 0
    V_RECORDING_REGION = 1
    GSYN_EXCITATORY_RECORDING_REGION = 2
    GSYN_INHIBITORY_RECORDING_REGION = 3

    RECORDING_REGION = {"spikes": 0, "v": 1, "gsyn_exc": 2, "gsyn_inh": 3}

    VARIABLE_LONG = {
        "spikes": "spikes",
        "v": "membrane voltage",
        "gsyn_exc": "gsyn_excitatory",
        "gsyn_inh": "gsyn_inhibitory"
    }

    N_RECORDING_REGIONS = 4

    # the size of the runtime SDP port data region
    RUNTIME_SDP_PORT_SIZE = 4

    # 6 elements before the start of global parameters
    BYTES_TILL_START_OF_GLOBAL_PARAMETERS = 24

    _n_vertices = 0

    non_pynn_default_parameters = {
        'spikes_per_second': None,
        'ring_buffer_sigma': None,
        'incoming_spike_buffer_size': None,
        'constraints': None,
        'label': None
    }

    def __init__(self,
                 n_neurons,
                 binary,
                 label,
                 max_atoms_per_core,
                 spikes_per_second,
                 ring_buffer_sigma,
                 incoming_spike_buffer_size,
                 model_name,
                 neuron_model,
                 input_type,
                 synapse_type,
                 threshold_type,
                 additional_input=None,
                 constraints=None):
        # pylint: disable=too-many-arguments, too-many-locals
        super(AbstractPopulationVertex, self).__init__(label, constraints,
                                                       max_atoms_per_core)

        self._units = {
            'spikes': 'spikes',
            'v': 'mV',
            'gsyn_exc': "uS",
            'gsyn_inh': "uS"
        }

        self._binary = binary
        self._n_atoms = n_neurons

        # buffer data
        self._incoming_spike_buffer_size = incoming_spike_buffer_size

        # get config from simulator
        config = globals_variables.get_simulator().config

        if incoming_spike_buffer_size is None:
            self._incoming_spike_buffer_size = config.getint(
                "Simulation", "incoming_spike_buffer_size")

        self._model_name = model_name
        self._neuron_model = neuron_model
        self._input_type = input_type
        self._threshold_type = threshold_type
        self._additional_input = additional_input

        # Set up for recording
        self._neuron_recorder = NeuronRecorder(
            ["spikes", "v", "gsyn_exc", "gsyn_inh"], n_neurons)

        self._time_between_requests = config.getint("Buffers",
                                                    "time_between_requests")
        self._minimum_buffer_sdram = config.getint("Buffers",
                                                   "minimum_buffer_sdram")
        self._using_auto_pause_and_resume = config.getboolean(
            "Buffers", "use_auto_pause_and_resume")
        self._receive_buffer_host = config.get("Buffers",
                                               "receive_buffer_host")
        self._receive_buffer_port = helpful_functions.read_config_int(
            config, "Buffers", "receive_buffer_port")

        # If live buffering is enabled, set a maximum on the buffer sizes
        spike_buffer_max_size = 0
        v_buffer_max_size = 0
        gsyn_buffer_max_size = 0
        self._buffer_size_before_receive = None
        if config.getboolean("Buffers", "enable_buffered_recording"):
            spike_buffer_max_size = config.getint("Buffers",
                                                  "spike_buffer_size")
            v_buffer_max_size = config.getint("Buffers", "v_buffer_size")
            gsyn_buffer_max_size = config.getint("Buffers", "gsyn_buffer_size")
            self._buffer_size_before_receive = config.getint(
                "Buffers", "buffer_size_before_receive")

        self._maximum_sdram_for_buffering = [
            spike_buffer_max_size, v_buffer_max_size, gsyn_buffer_max_size,
            gsyn_buffer_max_size
        ]

        # Set up synapse handling
        self._synapse_manager = SynapticManager(synapse_type,
                                                ring_buffer_sigma,
                                                spikes_per_second, config)

        # bool for if state has changed.
        self._change_requires_mapping = True
        self._change_requires_neuron_parameters_reload = False

        # Set up for profiling
        self._n_profile_samples = helpful_functions.read_config_int(
            config, "Reports", "n_profile_samples")

    @property
    @overrides(ApplicationVertex.n_atoms)
    def n_atoms(self):
        return self._n_atoms

    @inject_items({
        "graph": "MemoryApplicationGraph",
        "n_machine_time_steps": "TotalMachineTimeSteps",
        "machine_time_step": "MachineTimeStep"
    })
    @overrides(ApplicationVertex.get_resources_used_by_atoms,
               additional_arguments={
                   "graph", "n_machine_time_steps", "machine_time_step"
               })
    def get_resources_used_by_atoms(self, vertex_slice, graph,
                                    n_machine_time_steps, machine_time_step):
        # pylint: disable=arguments-differ

        # set resources required from this object
        container = ResourceContainer(
            sdram=SDRAMResource(
                self.get_sdram_usage_for_atoms(vertex_slice, graph,
                                               machine_time_step)),
            dtcm=DTCMResource(self.get_dtcm_usage_for_atoms(vertex_slice)),
            cpu_cycles=CPUCyclesPerTickResource(
                self.get_cpu_usage_for_atoms(vertex_slice)))

        recording_sizes = recording_utilities.get_recording_region_sizes(
            self._get_buffered_sdram(vertex_slice, n_machine_time_steps),
            self._minimum_buffer_sdram, self._maximum_sdram_for_buffering,
            self._using_auto_pause_and_resume)
        container.extend(
            recording_utilities.get_recording_resources(
                recording_sizes, self._receive_buffer_host,
                self._receive_buffer_port))

        # return the total resources.
        return container

    @property
    @overrides(AbstractChangableAfterRun.requires_mapping)
    def requires_mapping(self):
        return self._change_requires_mapping

    @overrides(AbstractChangableAfterRun.mark_no_changes)
    def mark_no_changes(self):
        self._change_requires_mapping = False

    def _get_buffered_sdram_per_timestep(self, vertex_slice):
        return [
            self._neuron_recorder.get_buffered_sdram_per_timestep(
                "spikes", vertex_slice),
            self._neuron_recorder.get_buffered_sdram_per_timestep(
                "v", vertex_slice),
            self._neuron_recorder.get_buffered_sdram_per_timestep(
                "gsyn_exc", vertex_slice),
            self._neuron_recorder.get_buffered_sdram_per_timestep(
                "gsyn_inh", vertex_slice)
        ]

    def _get_buffered_sdram(self, vertex_slice, n_machine_time_steps):
        return [
            self._neuron_recorder.get_buffered_sdram("spikes", vertex_slice,
                                                     n_machine_time_steps),
            self._neuron_recorder.get_buffered_sdram("v", vertex_slice,
                                                     n_machine_time_steps),
            self._neuron_recorder.get_buffered_sdram("gsyn_exc", vertex_slice,
                                                     n_machine_time_steps),
            self._neuron_recorder.get_buffered_sdram("gsyn_inh", vertex_slice,
                                                     n_machine_time_steps)
        ]

    @inject_items({"n_machine_time_steps": "TotalMachineTimeSteps"})
    @overrides(ApplicationVertex.create_machine_vertex,
               additional_arguments={"n_machine_time_steps"})
    def create_machine_vertex(self,
                              vertex_slice,
                              resources_required,
                              n_machine_time_steps,
                              label=None,
                              constraints=None):
        # pylint: disable=too-many-arguments, arguments-differ
        is_recording = len(self._neuron_recorder.recording_variables) > 0
        buffered_sdram_per_timestep = self._get_buffered_sdram_per_timestep(
            vertex_slice)
        buffered_sdram = self._get_buffered_sdram(vertex_slice,
                                                  n_machine_time_steps)
        minimum_buffer_sdram = recording_utilities.get_minimum_buffer_sdram(
            buffered_sdram, self._minimum_buffer_sdram)
        overflow_sdram = self._neuron_recorder.get_sampling_overflow_sdram(
            vertex_slice)
        vertex = PopulationMachineVertex(resources_required, is_recording,
                                         minimum_buffer_sdram,
                                         buffered_sdram_per_timestep, label,
                                         constraints, overflow_sdram)

        AbstractPopulationVertex._n_vertices += 1

        # return machine vertex
        return vertex

    def get_cpu_usage_for_atoms(self, vertex_slice):
        per_neuron_cycles = (
            _NEURON_BASE_N_CPU_CYCLES_PER_NEURON +
            self._neuron_model.get_n_cpu_cycles_per_neuron() +
            self._input_type.get_n_cpu_cycles_per_neuron(
                self._synapse_manager.synapse_type.get_n_synapse_types()) +
            self._threshold_type.get_n_cpu_cycles_per_neuron())
        if self._additional_input is not None:
            per_neuron_cycles += \
                self._additional_input.get_n_cpu_cycles_per_neuron()
        return (_NEURON_BASE_N_CPU_CYCLES + _C_MAIN_BASE_N_CPU_CYCLES +
                (per_neuron_cycles * vertex_slice.n_atoms) +
                self._neuron_recorder.get_n_cpu_cycles(vertex_slice.n_atoms) +
                self._synapse_manager.get_n_cpu_cycles())

    def get_dtcm_usage_for_atoms(self, vertex_slice):
        per_neuron_usage = (
            self._neuron_model.get_dtcm_usage_per_neuron_in_bytes() +
            self._input_type.get_dtcm_usage_per_neuron_in_bytes() +
            self._threshold_type.get_dtcm_usage_per_neuron_in_bytes())
        if self._additional_input is not None:
            per_neuron_usage += \
                self._additional_input.get_dtcm_usage_per_neuron_in_bytes()
        return (_NEURON_BASE_DTCM_USAGE_IN_BYTES +
                (per_neuron_usage * vertex_slice.n_atoms) +
                self._neuron_recorder.get_dtcm_usage_in_bytes(vertex_slice) +
                self._synapse_manager.get_dtcm_usage_in_bytes())

    def _get_sdram_usage_for_neuron_params_per_neuron(self):
        per_neuron_usage = (
            self._input_type.get_sdram_usage_per_neuron_in_bytes() +
            self._threshold_type.get_sdram_usage_per_neuron_in_bytes() +
            self._neuron_recorder.get_sdram_usage_per_neuron_in_bytes() +
            self._neuron_model.get_sdram_usage_per_neuron_in_bytes())
        if self._additional_input is not None:
            per_neuron_usage += \
                self._additional_input.get_sdram_usage_per_neuron_in_bytes()
        return per_neuron_usage

    def _get_sdram_usage_for_neuron_params(self, vertex_slice):
        """ calculates the sdram usage for just the neuron parameters region

        :param vertex_slice: the slice of atoms.
        :return:  The sdram required for the neuron region
        """
        per_neuron_usage = \
            self._get_sdram_usage_for_neuron_params_per_neuron()
        return (self.BYTES_TILL_START_OF_GLOBAL_PARAMETERS +
                self._neuron_model.
                get_sdram_usage_for_global_parameters_in_bytes() +
                self._neuron_recorder.
                get_sdram_usage_for_global_parameters_in_bytes() +
                (per_neuron_usage * vertex_slice.n_atoms))

    def get_sdram_usage_for_atoms(self, vertex_slice, graph,
                                  machine_time_step):
        sdram_requirement = (
            common_constants.SYSTEM_BYTES_REQUIREMENT +
            self._get_sdram_usage_for_neuron_params(vertex_slice) +
            recording_utilities.get_recording_header_size(
                self.N_RECORDING_REGIONS) +
            PopulationMachineVertex.get_provenance_data_size(
                PopulationMachineVertex.N_ADDITIONAL_PROVENANCE_DATA_ITEMS) +
            self._synapse_manager.get_sdram_usage_in_bytes(
                vertex_slice, graph.get_edges_ending_at_vertex(self),
                machine_time_step) +
            (self._get_number_of_mallocs_used_by_dsg() *
             common_constants.SARK_PER_MALLOC_SDRAM_USAGE) +
            profile_utils.get_profile_region_size(self._n_profile_samples))

        return sdram_requirement

    def _get_number_of_mallocs_used_by_dsg(self):
        extra_mallocs = len(self._neuron_recorder.recording_variables)
        return (self.BASIC_MALLOC_USAGE +
                self._synapse_manager.get_number_of_mallocs_used_by_dsg() +
                extra_mallocs)

    def _reserve_memory_regions(self, spec, vertex_slice, vertex):

        spec.comment("\nReserving memory space for data regions:\n\n")

        # Reserve memory:
        spec.reserve_memory_region(
            region=constants.POPULATION_BASED_REGIONS.SYSTEM.value,
            size=common_constants.SYSTEM_BYTES_REQUIREMENT,
            label='System')

        self._reserve_neuron_params_data_region(spec, vertex_slice)

        spec.reserve_memory_region(
            region=constants.POPULATION_BASED_REGIONS.RECORDING.value,
            size=recording_utilities.get_recording_header_size(
                self.N_RECORDING_REGIONS))

        profile_utils.reserve_profile_region(
            spec, constants.POPULATION_BASED_REGIONS.PROFILING.value,
            self._n_profile_samples)

        vertex.reserve_provenance_data_region(spec)

    def _reserve_neuron_params_data_region(self, spec, vertex_slice):
        """ reserve the neuron parameter data region

        :param spec: the spec to write the dsg region to
        :param vertex_slice: the slice of atoms from the application vertex
        :return: None
        """
        params_size = self._get_sdram_usage_for_neuron_params(vertex_slice)
        spec.reserve_memory_region(
            region=constants.POPULATION_BASED_REGIONS.NEURON_PARAMS.value,
            size=params_size,
            label='NeuronParams')

    def _write_neuron_parameters(self, spec, key, vertex_slice,
                                 machine_time_step, time_scale_factor):
        # pylint: disable=too-many-arguments
        n_atoms = (vertex_slice.hi_atom - vertex_slice.lo_atom) + 1
        spec.comment(
            "\nWriting Neuron Parameters for {} Neurons:\n".format(n_atoms))

        # Set the focus to the memory region 2 (neuron parameters):
        spec.switch_write_focus(
            region=constants.POPULATION_BASED_REGIONS.NEURON_PARAMS.value)

        # Write the random back off value
        spec.write_value(
            random.randint(0, AbstractPopulationVertex._n_vertices))

        # Write the number of microseconds between sending spikes
        time_between_spikes = ((machine_time_step * time_scale_factor) /
                               (n_atoms * 2.0))
        spec.write_value(data=int(time_between_spikes))

        # Write whether the key is to be used, and then the key, or 0 if it
        # isn't to be used
        if key is None:
            spec.write_value(data=0)
            spec.write_value(data=0)
        else:
            spec.write_value(data=1)
            spec.write_value(data=key)

        # Write the number of neurons in the block:
        spec.write_value(data=n_atoms)

        # Write the size of the incoming spike buffer
        spec.write_value(data=self._incoming_spike_buffer_size)

        # Write the recording rates and sizes
        record_globals = self._neuron_recorder.get_global_parameters(
            vertex_slice)
        for param in record_globals:
            spec.write_value(data=param.get_value(),
                             data_type=param.get_dataspec_datatype())

        # Write the index parameters
        indexes = self._neuron_recorder.get_index_parameters(vertex_slice)
        utility_calls.write_parameters_per_neuron(spec,
                                                  vertex_slice,
                                                  indexes,
                                                  slice_paramaters=True)

        # Write the global parameters
        global_params = self._neuron_model.get_global_parameters()
        for param in global_params:
            spec.write_value(data=param.get_value(),
                             data_type=param.get_dataspec_datatype())

        # Write the neuron parameters
        utility_calls.write_parameters_per_neuron(
            spec, vertex_slice, self._neuron_model.get_neural_parameters())

        # Write the input type parameters
        utility_calls.write_parameters_per_neuron(
            spec, vertex_slice, self._input_type.get_input_type_parameters())

        # Write the additional input parameters
        if self._additional_input is not None:
            utility_calls.write_parameters_per_neuron(
                spec, vertex_slice, self._additional_input.get_parameters())

        # Write the threshold type parameters
        utility_calls.write_parameters_per_neuron(
            spec, vertex_slice,
            self._threshold_type.get_threshold_parameters())

    @inject_items({
        "machine_time_step": "MachineTimeStep",
        "time_scale_factor": "TimeScaleFactor",
        "graph_mapper": "MemoryGraphMapper",
        "routing_info": "MemoryRoutingInfos"
    })
    @overrides(AbstractRewritesDataSpecification.regenerate_data_specification,
               additional_arguments={
                   "machine_time_step", "time_scale_factor", "graph_mapper",
                   "routing_info"
               })
    def regenerate_data_specification(self, spec, placement, machine_time_step,
                                      time_scale_factor, graph_mapper,
                                      routing_info):
        # pylint: disable=too-many-arguments, arguments-differ
        vertex_slice = graph_mapper.get_slice(placement.vertex)

        # reserve the neuron parameters data region
        self._reserve_neuron_params_data_region(
            spec, graph_mapper.get_slice(placement.vertex))

        # write the neuron params into the new dsg region
        self._write_neuron_parameters(
            key=routing_info.get_first_key_from_pre_vertex(
                placement.vertex, constants.SPIKE_PARTITION_ID),
            machine_time_step=machine_time_step,
            spec=spec,
            time_scale_factor=time_scale_factor,
            vertex_slice=vertex_slice)

        self._synapse_manager.regenerate_data_specification(
            spec, placement, machine_time_step, time_scale_factor,
            vertex_slice)

        # close spec
        spec.end_specification()

    @overrides(AbstractRewritesDataSpecification.
               requires_memory_regions_to_be_reloaded)
    def requires_memory_regions_to_be_reloaded(self):
        return self._change_requires_neuron_parameters_reload

    @overrides(AbstractRewritesDataSpecification.mark_regions_reloaded)
    def mark_regions_reloaded(self):
        self._change_requires_neuron_parameters_reload = False

    @inject_items({
        "machine_time_step": "MachineTimeStep",
        "time_scale_factor": "TimeScaleFactor",
        "graph_mapper": "MemoryGraphMapper",
        "application_graph": "MemoryApplicationGraph",
        "machine_graph": "MemoryMachineGraph",
        "routing_info": "MemoryRoutingInfos",
        "tags": "MemoryTags",
        "n_machine_time_steps": "TotalMachineTimeSteps"
    })
    @overrides(AbstractGeneratesDataSpecification.generate_data_specification,
               additional_arguments={
                   "machine_time_step", "time_scale_factor", "graph_mapper",
                   "application_graph", "machine_graph", "routing_info",
                   "tags", "n_machine_time_steps"
               })
    def generate_data_specification(self, spec, placement, machine_time_step,
                                    time_scale_factor, graph_mapper,
                                    application_graph, machine_graph,
                                    routing_info, tags, n_machine_time_steps):
        # pylint: disable=too-many-arguments, arguments-differ
        vertex = placement.vertex

        spec.comment("\n*** Spec for block of {} neurons ***\n".format(
            self._model_name))
        vertex_slice = graph_mapper.get_slice(vertex)

        # Reserve memory regions
        self._reserve_memory_regions(spec, vertex_slice, vertex)

        # Declare random number generators and distributions:
        # TODO add random distribution stuff
        # self.write_random_distribution_declarations(spec)

        # Get the key
        key = routing_info.get_first_key_from_pre_vertex(
            vertex, constants.SPIKE_PARTITION_ID)

        # Write the setup region
        spec.switch_write_focus(
            constants.POPULATION_BASED_REGIONS.SYSTEM.value)
        spec.write_array(
            simulation_utilities.get_simulation_header_array(
                self.get_binary_file_name(), machine_time_step,
                time_scale_factor))

        # Write the recording region
        spec.switch_write_focus(
            constants.POPULATION_BASED_REGIONS.RECORDING.value)
        ip_tags = tags.get_ip_tags_for_vertex(vertex)
        recorded_region_sizes = recording_utilities.get_recorded_region_sizes(
            self._get_buffered_sdram(vertex_slice, n_machine_time_steps),
            self._maximum_sdram_for_buffering)
        spec.write_array(
            recording_utilities.get_recording_header_array(
                recorded_region_sizes, self._time_between_requests,
                self._buffer_size_before_receive, ip_tags))

        # Write the neuron parameters
        self._write_neuron_parameters(spec, key, vertex_slice,
                                      machine_time_step, time_scale_factor)

        # write profile data
        profile_utils.write_profile_region_data(
            spec, constants.POPULATION_BASED_REGIONS.PROFILING.value,
            self._n_profile_samples)

        # allow the synaptic matrix to write its data spec-able data
        self._synapse_manager.write_data_spec(spec, self, vertex_slice, vertex,
                                              placement, machine_graph,
                                              application_graph, routing_info,
                                              graph_mapper, self._input_type,
                                              machine_time_step)

        # End the writing of this specification:
        spec.end_specification()

    @overrides(AbstractHasAssociatedBinary.get_binary_file_name)
    def get_binary_file_name(self):

        # Split binary name into title and extension
        binary_title, binary_extension = os.path.splitext(self._binary)

        # Reunite title and extension and return
        return (binary_title + self._synapse_manager.vertex_executable_suffix +
                binary_extension)

    @overrides(AbstractHasAssociatedBinary.get_binary_start_type)
    def get_binary_start_type(self):
        return ExecutableType.USES_SIMULATION_INTERFACE

    @overrides(AbstractSpikeRecordable.is_recording_spikes)
    def is_recording_spikes(self):
        return self._neuron_recorder.is_recording("spikes")

    @overrides(AbstractSpikeRecordable.set_recording_spikes)
    def set_recording_spikes(self,
                             new_state=True,
                             sampling_interval=None,
                             indexes=None):
        self.set_recording("spikes", new_state, sampling_interval, indexes)

    @overrides(AbstractSpikeRecordable.get_spikes)
    def get_spikes(self, placements, graph_mapper, buffer_manager,
                   machine_time_step):
        return self._neuron_recorder.get_spikes(self.label, buffer_manager,
                                                self.SPIKE_RECORDING_REGION,
                                                placements, graph_mapper, self,
                                                machine_time_step)

    @overrides(AbstractNeuronRecordable.get_recordable_variables)
    def get_recordable_variables(self):
        return self._neuron_recorder.get_recordable_variables()

    @overrides(AbstractNeuronRecordable.is_recording)
    def is_recording(self, variable):
        return self._neuron_recorder.is_recording(variable)

    @overrides(AbstractNeuronRecordable.set_recording)
    def set_recording(self,
                      variable,
                      new_state=True,
                      sampling_interval=None,
                      indexes=None):
        self._change_requires_mapping = not self.is_recording(variable)
        self._neuron_recorder.set_recording(variable, new_state,
                                            sampling_interval, indexes)

    @overrides(AbstractNeuronRecordable.get_data)
    def get_data(self, variable, n_machine_time_steps, placements,
                 graph_mapper, buffer_manager, machine_time_step):
        # pylint: disable=too-many-arguments
        return self._neuron_recorder.get_matrix_data(
            self.label, buffer_manager, self.RECORDING_REGION[variable],
            placements, graph_mapper, self, variable, n_machine_time_steps)

    @overrides(AbstractNeuronRecordable.get_neuron_sampling_interval)
    def get_neuron_sampling_interval(self, variable):
        return self._neuron_recorder.get_neuron_sampling_interval(variable)

    @overrides(AbstractSpikeRecordable.get_spikes_sampling_interval)
    def get_spikes_sampling_interval(self):
        return self._neuron_recorder.get_neuron_sampling_interval("spikes")

    @overrides(AbstractPopulationInitializable.initialize)
    def initialize(self, variable, value):
        initialize_attr = getattr(self._neuron_model,
                                  "initialize_%s" % variable, None)
        if initialize_attr is None or not callable(initialize_attr):
            raise Exception("Vertex does not support initialisation of"
                            " parameter {}".format(variable))
        initialize_attr(value)
        self._change_requires_neuron_parameters_reload = True

    @property
    def input_type(self):
        return self._input_type

    @overrides(AbstractPopulationSettable.get_value)
    def get_value(self, key):
        """ Get a property of the overall model
        """
        for obj in [
                self._neuron_model, self._input_type, self._threshold_type,
                self._synapse_manager.synapse_type, self._additional_input,
                self
        ]:
            if hasattr(obj, key):
                return getattr(obj, key)
        raise Exception("Population {} does not have parameter {}".format(
            self._model_name, key))

    @overrides(AbstractPopulationSettable.set_value)
    def set_value(self, key, value):
        """ Set a property of the overall model
        """
        for obj in [
                self._neuron_model, self._input_type, self._threshold_type,
                self._synapse_manager.synapse_type, self._additional_input
        ]:
            if hasattr(obj, key):
                setattr(obj, key, value)
                self._change_requires_neuron_parameters_reload = True
                return
        raise InvalidParameterType("Type {} does not have parameter {}".format(
            type(self), key))

    @overrides(AbstractReadParametersBeforeSet.read_parameters_from_machine)
    def read_parameters_from_machine(self, transceiver, placement,
                                     vertex_slice):

        # locate sdram address to where the neuron parameters are stored
        neuron_region_sdram_address = \
            helpful_functions.locate_memory_region_for_placement(
                placement,
                constants.POPULATION_BASED_REGIONS.NEURON_PARAMS.value,
                transceiver)

        # shift past the extra stuff before neuron parameters that we don't
        # need to read
        neuron_parameters_sdram_address = (
            neuron_region_sdram_address +
            self.BYTES_TILL_START_OF_GLOBAL_PARAMETERS)

        # get size of neuron params
        size_of_region = self._get_sdram_usage_for_neuron_params(vertex_slice)
        size_of_region -= self.BYTES_TILL_START_OF_GLOBAL_PARAMETERS

        # get data from the machine
        byte_array = transceiver.read_memory(placement.x, placement.y,
                                             neuron_parameters_sdram_address,
                                             size_of_region)

        # Skip the recorder globals as these are not change on machione
        # Just written out in case data is changed and written back
        offset = self._neuron_recorder.get_size_of_global_parameters(
            vertex_slice)

        # update python neuron parameters with the data

        # handle global params (only once, so given a slice of 0 to 0)
        global_params, offset = utility_calls.translate_parameters(
            self._neuron_model.get_global_parameter_types(), byte_array,
            offset, Slice(0, 0))
        self._neuron_model.set_global_parameters(global_params)

        # handle state params for a neuron
        neuron_params, offset = utility_calls.translate_parameters(
            self._neuron_model.get_neural_parameter_types(), byte_array,
            offset, vertex_slice)
        self._neuron_model.set_neural_parameters(neuron_params, vertex_slice)

        # handle input params
        input_params, offset = utility_calls.translate_parameters(
            self._input_type.get_input_type_parameter_types(), byte_array,
            offset, vertex_slice)
        self._input_type.set_input_type_parameters(input_params, vertex_slice)

        # handle additional input params, if they exist
        if self._additional_input is not None:
            additional_params, offset = utility_calls.translate_parameters(
                self._additional_input.get_parameter_types(), byte_array,
                offset, vertex_slice)
            self._additional_input.set_parameters(additional_params,
                                                  vertex_slice)

        # handle threshold type params
        threshold_params, offset = utility_calls.translate_parameters(
            self._threshold_type.get_threshold_parameter_types(), byte_array,
            offset, vertex_slice)
        self._threshold_type.set_threshold_parameters(threshold_params,
                                                      vertex_slice)

        # Read synapse parameters
        self._synapse_manager.read_parameters_from_machine(
            transceiver, placement, vertex_slice)

    @property
    def weight_scale(self):
        return self._input_type.get_global_weight_scale()

    @property
    def ring_buffer_sigma(self):
        return self._synapse_manager.ring_buffer_sigma

    @ring_buffer_sigma.setter
    def ring_buffer_sigma(self, ring_buffer_sigma):
        self._synapse_manager.ring_buffer_sigma = ring_buffer_sigma

    @property
    def spikes_per_second(self):
        return self._synapse_manager.spikes_per_second

    @spikes_per_second.setter
    def spikes_per_second(self, spikes_per_second):
        self._synapse_manager.spikes_per_second = spikes_per_second

    @property
    def synapse_dynamics(self):
        return self._synapse_manager.synapse_dynamics

    def set_synapse_dynamics(self, synapse_dynamics):
        self._synapse_manager.synapse_dynamics = synapse_dynamics

    def add_pre_run_connection_holder(self, connection_holder, edge,
                                      synapse_info):
        # pylint: disable=arguments-differ
        self._synapse_manager.add_pre_run_connection_holder(
            connection_holder, edge, synapse_info)

    @overrides(AbstractAcceptsIncomingSynapses.get_connections_from_machine)
    def get_connections_from_machine(
            self,
            transceiver,
            placement,
            edge,
            graph_mapper,
            routing_infos,
            synapse_information,
            machine_time_step,
            using_extra_monitor_cores,
            placements=None,
            data_receiver=None,
            sender_extra_monitor_core_placement=None,
            extra_monitor_cores_for_router_timeout=None,
            handle_time_out_configuration=True,
            fixed_routes=None):
        # pylint: disable=too-many-arguments
        return self._synapse_manager.get_connections_from_machine(
            transceiver, placement, edge, graph_mapper, routing_infos,
            synapse_information, machine_time_step, using_extra_monitor_cores,
            placements, data_receiver, sender_extra_monitor_core_placement,
            extra_monitor_cores_for_router_timeout,
            handle_time_out_configuration, fixed_routes)

    def clear_connection_cache(self):
        self._synapse_manager.clear_connection_cache()

    @property
    def synapse_type(self):
        return self._synapse_manager.synapse_type

    def get_maximum_delay_supported_in_ms(self, machine_time_step):
        return self._synapse_manager.get_maximum_delay_supported_in_ms(
            machine_time_step)

    @overrides(AbstractProvidesIncomingPartitionConstraints.
               get_incoming_partition_constraints)
    def get_incoming_partition_constraints(self, partition):
        """ Gets the constraints for partitions going into this vertex

        :param partition: partition that goes into this vertex
        :return: list of constraints
        """
        return self._synapse_manager.get_incoming_partition_constraints()

    @overrides(AbstractProvidesOutgoingPartitionConstraints.
               get_outgoing_partition_constraints)
    def get_outgoing_partition_constraints(self, partition):
        """ Gets the constraints for partitions going out of this vertex

        :param partition: the partition that leaves this vertex
        :return: list of constraints
        """
        return [ContiguousKeyRangeContraint()]

    @overrides(AbstractNeuronRecordable.clear_recording)
    def clear_recording(self, variable, buffer_manager, placements,
                        graph_mapper):
        self._clear_recording_region(buffer_manager, placements, graph_mapper,
                                     self.RECORDING_REGION[variable])

    @overrides(AbstractSpikeRecordable.clear_spike_recording)
    def clear_spike_recording(self, buffer_manager, placements, graph_mapper):
        self._clear_recording_region(
            buffer_manager, placements, graph_mapper,
            AbstractPopulationVertex.SPIKE_RECORDING_REGION)

    def _clear_recording_region(self, buffer_manager, placements, graph_mapper,
                                recording_region_id):
        """ clears a recorded data region from the buffer manager

        :param buffer_manager: the buffer manager object
        :param placements: the placements object
        :param graph_mapper: the graph mapper object
        :param recording_region_id: the recorded region id for clearing
        :rtype: None
        """
        machine_vertices = graph_mapper.get_machine_vertices(self)
        for machine_vertex in machine_vertices:
            placement = placements.get_placement_of_vertex(machine_vertex)
            buffer_manager.clear_recorded_data(placement.x, placement.y,
                                               placement.p,
                                               recording_region_id)

    @overrides(AbstractContainsUnits.get_units)
    def get_units(self, variable):
        # search the model components for the variable
        for obj in [
                self._neuron_model, self._input_type, self._threshold_type,
                self._synapse_manager.synapse_type, self._additional_input
        ]:
            if (hasattr(obj, variable)
                    and isinstance(obj, AbstractContainsUnits)):
                return obj.unit(variable)
        # if not in the components, must be within myself, so call my own units
        if variable in self._units:
            return self._units[variable]
        else:
            raise InvalidParameterType(
                "The parameter {} does not exist in this input "
                "conductance component".format(variable))

    def describe(self):
        """
        Returns a human-readable description of the cell or synapse type.

        The output may be customised by specifying a different template
        together with an associated template engine
        (see ``pyNN.descriptions``).

        If template is None, then a dictionary containing the template context
        will be returned.
        """
        parameters = dict()
        for parameter_name in self.default_parameters:
            parameters[parameter_name] = self.get_value(parameter_name)

        context = {
            "name": self._model_name,
            "default_parameters": self.default_parameters,
            "default_initial_values": self.default_parameters,
            "parameters": parameters,
        }
        return context

    def __str__(self):
        return "{} with {} atoms".format(self.label, self.n_atoms)

    def __repr__(self):
        return self.__str__()
Ejemplo n.º 3
0
class SpiNNakEarApplicationVertex(
        ApplicationVertex, AbstractAcceptsIncomingSynapses,
        SimplePopulationSettable, HandOverToVertex, AbstractChangableAfterRun,
        AbstractSpikeRecordable, AbstractNeuronRecordable,
        AbstractControlsDestinationOfEdges, AbstractControlsSourceOfEdges,
        AbstractSendsOutgoingSynapses, AbstractCanReset, AbstractContainsUnits,
        AbstractApplicationSupportsAutoPauseAndResume,
        AbstractProvidesNKeysForPartition):

    __slots__ = [
        # pynn model
        '_model',
        # bool flag for neuron param changes
        '_remapping_required',
        # ihcan vertices
        "_ihcan_vertices",
        # drnl vertices
        "_drnl_vertices",
        # final agg verts (outgoing atoms)
        "_final_agg_vertices",
        # storing synapse dynamics
        "_synapse_dynamics",
        # fibres per.... something
        "_n_fibres_per_ihc",
        # the seed for the inner hair fibre
        "_ihcan_fibre_random_seed",
        # the number of columns / rows for aggregation tree
        "_n_group_tree_rows",
        # the synaptic manager to manage projections into drnl verts.
        "__synapse_manager",
        # the number of drnls there are.
        "_n_dnrls",
        # the number of agg verts which are final aggregation verts.
        "_n_final_agg_groups",
        # the pole frequencies
        "_pole_freqs",
        # The timer period for the fast components
        "_timer_period"
    ]

    # NOTES IHC = inner hair cell
    #       IHCan =  inner hair channel
    #       DRNL = middle ear filter
    #       OME ear fluid

    # error message for frequency
    FREQUENCY_ERROR = (
        "The input sampling frequency is too high for the chosen simulation "
        "time scale. Please reduce Fs or increase the time scale factor in "
        "the config file")

    # error message for get units
    GET_UNIT_ERROR = "do not know what to do with variable {} for get units"

    # error message if getting source outside aggregation verts
    PRE_SLICE_ERROR = (
        "Why are you asking for a source outside of aggregation verts?!")

    # error message if getting destination verts outside drnls.
    POST_SLICE_ERROR = (
        "why you asking for a destination atoms outside of the drnl verts!?")

    # error for processing plastic synapses
    PLASTIC_SYNAPSE_ERROR = (
        "The SpiNNaear cannot handle plastic synapses at the moment, "
        "complain to the SpiNNaker software team if this is a problem.")

    # error message for being asked to clear recording of a param we dont know
    CLEAR_RECORDING_ERROR = "Spinnakear does not support recording of {}"

    # error message for set recording of a variable we dont know about
    RECORDING_ERROR = "Spinnakear does not support recording of {}"

    # error message for sampling interval
    SAMPLING_INTERVAL_ERROR = "do not know how to handle variable {}"

    # error message for incorrect neurons map
    N_NEURON_ERROR = (
        "the number of neurons {} and the number of atoms  {} do not match")

    # app edge mc partition id
    MC_APP_EDGE_PARTITION_ID = "internal_mc"

    # app edge sdram partition id
    SDRAM_APP_EDGE_PARTITION_ID = "internal_sdram"

    # green wood function from https://en.wikipedia.org/wiki/Greenwood_function
    # constant below and mapped to variable names

    # green wood constants for human cochlea hearing frequency mapping
    # A is a scaling constant between the characteristic frequency and the
    # upper frequency limit of the species
    GREEN_WOOD_HUMAN_CONSTANT_A = 165.4

    # a is the slope of the straight-line portion of the frequency-position
    # curve, which has shown to be conserved throughout all investigated
    # species after scaling the length of the cochlea
    GREEN_WOOD_HUMAN_CONSTANT_ALPHA = 2.1

    # K is a constant of integration that represents the divergence from the
    # log nature of the curve and is determined by the lower frequency
    # audible limit in the species.
    GREEN_WOOD_HUMAN_CONSTANT_K = 0.88

    # n recording regions
    _N_POPULATION_RECORDING_REGIONS = 1

    # random numbers
    _FINAL_ROW_N_ATOMS = 256
    MAX_TIME_SCALE_FACTOR_RATIO = 22050

    # flags for sorting out random fibres. might be a enum
    HSR_FLAG = 2
    MSR_FLAG = 1
    LSR_FLAG = 0

    # how many synapse types this binary supports
    N_SYNAPSE_TYPES = 2

    # these curve values are built from profiling the IHCAN cores to deduce
    # performance.
    CURVE_ONE = 18.12
    CURVE_TWO = 10.99

    # max audio frequency supported
    DEFAULT_MAX_AUDIO_FREQUENCY = 20000

    # biggest number of neurons for the ear model
    FULL_EAR_HAIR_FIBERS = 30000.0

    # min audio frequency supported
    DEFAULT_MIN_AUDIO_FREQUENCY = 30

    def __init__(self, n_neurons, constraints, label, model, profile,
                 time_scale_factor):
        # Superclasses
        ApplicationVertex.__init__(self, label, constraints)
        AbstractAcceptsIncomingSynapses.__init__(self)
        SimplePopulationSettable.__init__(self)
        HandOverToVertex.__init__(self)
        AbstractChangableAfterRun.__init__(self)
        AbstractSpikeRecordable.__init__(self)
        AbstractNeuronRecordable.__init__(self)
        AbstractProvidesNKeysForPartition.__init__(self)

        self._model = model
        self._profile = profile
        self._remapping_required = True
        self._synapse_dynamics = None
        self._n_fibres_per_ihc = None
        self._n_group_tree_rows = None
        self._ihcan_vertices = list()
        self._drnl_vertices = list()
        self._final_agg_vertices = list()
        self.__synapse_manager = SynapticManager(
            self.N_SYNAPSE_TYPES, None, None,
            globals_variables.get_simulator().config)

        # calculate n fibres per ihcan core
        sample_time = time_scale_factor / self._model.fs

        # how many channels
        self._n_channels = int(self.get_out_going_size() /
                               self._model.n_fibres_per_ihc)

        # process pole freqs
        self._pole_freqs = self._process_pole_freqs()

        # how many fibres / atoms ran on each ihcan core
        self._n_fibres_per_ihcan_core = self.fibres_per_ihcan_core(
            sample_time, self._model.n_fibres_per_ihc)

        # process all the other internal numbers
        atoms_per_row = self.process_internal_numbers()

        # read in param file if needed
        self._process_param_file(atoms_per_row)

        # recording stuff
        self._drnl_neuron_recorder = NeuronRecorder(
            DRNLMachineVertex.RECORDABLES,
            DRNLMachineVertex.get_matrix_scalar_data_types(),
            DRNLMachineVertex.get_matrix_output_data_types(), self._n_dnrls)

        self._ihcan_neuron_recorder = NeuronRecorder(
            IHCANMachineVertex.RECORDABLES,
            IHCANMachineVertex.get_matrix_scalar_data_types(),
            IHCANMachineVertex.get_matrix_output_data_types(),
            self._n_dnrls * self._n_fibres_per_ihc)

        # bool for if state has changed.
        self._change_requires_mapping = True
        self._change_requires_neuron_parameters_reload = False
        self._change_requires_data_generation = False
        self._has_reset_last = True

        # safety check
        if self._n_atoms != n_neurons:
            raise ConfigurationException(
                self.N_NEURON_ERROR.format(n_neurons, self._n_atoms))

        # safety stuff
        if (self._model.fs / time_scale_factor >
                self.MAX_TIME_SCALE_FACTOR_RATIO):
            raise Exception(self.FREQUENCY_ERROR)

        # write timer period
        self._timer_period = (MICRO_TO_SECOND_CONVERSION *
                              (self._model.seq_size / self._model.fs))

    @overrides(AbstractProvidesNKeysForPartition.get_n_keys_for_partition)
    def get_n_keys_for_partition(self, partition, graph_mapper):
        return partition.pre_vertex.get_n_keys_for_partition(
            partition, graph_mapper)

    @overrides(AbstractNeuronRecordable.get_expected_n_rows)
    def get_expected_n_rows(self, current_run_timesteps_map, sampling_rate,
                            vertex, variable):
        if isinstance(vertex, DRNLMachineVertex):
            return int(
                (self._drnl_neuron_recorder.expected_rows_for_a_run_time(
                    current_run_timesteps_map, vertex, sampling_rate)) *
                self._model.seq_size)
        else:
            return int(
                self._ihcan_neuron_recorder.expected_rows_for_a_run_time(
                    current_run_timesteps_map, vertex, sampling_rate))

    @staticmethod
    def fibres_per_ihcan_core(sample_time, n_fibres_per_ihc):
        # how many fibras / atoms ran on each ihcan core
        max_possible = abs(
            int(
                math.floor(((sample_time * MICRO_TO_SECOND_CONVERSION) -
                            SpiNNakEarApplicationVertex.CURVE_ONE) /
                           SpiNNakEarApplicationVertex.CURVE_TWO)))
        return min(n_fibres_per_ihc, max_possible, 2)

    @overrides(AbstractAcceptsIncomingSynapses.gen_on_machine)
    def gen_on_machine(self, vertex_slice):
        return self.__synapse_manager.gen_on_machine(vertex_slice)

    @overrides(AbstractApplicationSupportsAutoPauseAndResume.
               my_variable_local_time_period)
    def my_variable_local_time_period(self, default_machine_time_step,
                                      variable):
        if variable == DRNLMachineVertex.MOC:
            return default_machine_time_step
        else:
            return self._timer_period

    def reset_to_first_timestep(self):
        # Mark that reset has been done, and reload state variables
        self._has_reset_last = True
        self._change_requires_neuron_parameters_reload = False

        # If synapses change during the run,
        if self._synapse_manager.synapse_dynamics.changes_during_run:
            self._change_requires_data_generation = True

    def get_units(self, variable):
        if variable in DRNLMachineVertex.RECORDABLES:
            return DRNLMachineVertex.RECORDABLE_UNITS[variable]
        elif variable in IHCANMachineVertex.RECORDABLES:
            return IHCANMachineVertex.RECORDABLE_UNITS[variable]
        else:
            raise Exception(self.GET_UNIT_ERROR.format(variable))

    def _process_param_file(self, atoms_per_row):
        if self._model.param_file is not None:
            try:
                pre_gen_vars = numpy.load(self._model.param_file)
                self._n_atoms = pre_gen_vars['n_atoms']
                self._mv_index_list = pre_gen_vars['mv_index_list']
                self._parent_index_list = pre_gen_vars['parent_index_list']
                self._edge_index_list = pre_gen_vars['edge_index_list']
                self._ihc_seeds = pre_gen_vars['ihc_seeds']
                self._ome_indices = pre_gen_vars['ome_indices']
            except Exception:
                self._n_atoms, self._n_dnrls, self._n_final_agg_groups = \
                    self.calculate_n_atoms_for_each_vertex_type(
                        atoms_per_row, self._n_channels,
                        self._model.n_fibres_per_ihc, self._model.seq_size)
                # save fixed param file
                self._save_pre_gen_vars(self._model.param_file)
        else:
            self._n_atoms, self._n_dnrls, self._n_final_agg_groups = \
                self.calculate_n_atoms_for_each_vertex_type(
                    atoms_per_row, self._n_channels,
                    self._model.n_fibres_per_ihc, self._model.seq_size)

    def process_internal_numbers(self):

        # ear hair frequency bits in total per inner ear channel
        self._n_fibres_per_ihc = (self._model.n_lsr_per_ihc +
                                  self._model.n_msr_per_ihc +
                                  self._model.n_hsr_per_ihc)

        # number of columns needed for the aggregation tree
        atoms_per_row = self.calculate_atoms_per_row(
            self._n_channels, self._n_fibres_per_ihc,
            self._n_fibres_per_ihcan_core,
            self._model.max_input_to_aggregation_group)

        # if no rows, then just add 1 row with 1 vertex.
        if atoms_per_row == 0:
            self._n_group_tree_rows = 1
            return self._n_fibres_per_ihcan_core
        else:

            # figure how many atoms per aggregation element per row
            max_n_atoms_per_group_tree_row = (
                (self._model.max_input_to_aggregation_group**numpy.arange(
                    1, atoms_per_row + 1)) * self._n_fibres_per_ihcan_core)

            # filter rows max atoms so that its capped at 256
            max_n_atoms_per_group_tree_row = \
                max_n_atoms_per_group_tree_row[
                    max_n_atoms_per_group_tree_row <= min(
                        self._FINAL_ROW_N_ATOMS,
                        self._n_channels * self._n_fibres_per_ihc)]

            self._n_group_tree_rows = max_n_atoms_per_group_tree_row.size
        return atoms_per_row

    def _process_pole_freqs(self):
        if self._model.pole_freqs is None:
            if self._model.fs > 2 * self.DEFAULT_MAX_AUDIO_FREQUENCY:  # use
                # the greenwood mapping
                pole_freqs = (numpy.flipud([
                    self.GREEN_WOOD_HUMAN_CONSTANT_A *
                    (10**(self.GREEN_WOOD_HUMAN_CONSTANT_ALPHA *
                          numpy.linspace([0], [1], self._n_channels)) -
                     self.GREEN_WOOD_HUMAN_CONSTANT_K)
                ]))

            # don't want alias frequencies so we use a capped log scale map
            else:
                max_power = min([
                    numpy.log10(self.fs / 2.),
                    numpy.log10(self.DEFAULT_MAX_AUDIO_FREQUENCY)
                ])
                pole_freqs = numpy.flipud(
                    numpy.logspace(
                        numpy.log10(self.DEFAULT_MIN_AUDIO_FREQUENCY),
                        max_power, self._n_channels))
        else:
            pole_freqs = self._model.pole_freqs
        return pole_freqs[0]

    @overrides(AbstractSendsOutgoingSynapses.get_out_going_size)
    def get_out_going_size(self):
        return (int(self.FULL_EAR_HAIR_FIBERS * float(self._model.scale) /
                    self._model.n_fibres_per_ihc) *
                self._model.n_fibres_per_ihc)

    @overrides(AbstractControlsSourceOfEdges.get_out_going_slices)
    def get_out_going_slices(self):
        slices = list()
        starter = 0
        for agg_vertex in self._final_agg_vertices:
            slices.append(agg_vertex.connection_slice)
            starter += agg_vertex.n_atoms
        return slices

    @overrides(AbstractControlsDestinationOfEdges.get_in_coming_slices)
    def get_in_coming_slices(self):
        slices = list()
        starter = 0
        for _ in self._drnl_vertices:
            slices.append(Slice(starter, starter))
            starter += 1
        return slices

    @overrides(AbstractControlsSourceOfEdges.get_pre_slice_for)
    def get_pre_slice_for(self, machine_vertex):
        if isinstance(machine_vertex, ANGroupMachineVertex):
            if machine_vertex.is_final_row:
                return machine_vertex.connection_slice
        raise Exception(self.PRE_SLICE_ERROR)

    @overrides(AbstractControlsDestinationOfEdges.get_post_slice_for)
    def get_post_slice_for(self, machine_vertex):
        if isinstance(machine_vertex, DRNLMachineVertex):
            return Slice(machine_vertex.drnl_index, machine_vertex.drnl_index)
        raise Exception(self.POST_SLICE_ERROR)

    @overrides(AbstractAcceptsIncomingSynapses.get_in_coming_size)
    def get_in_coming_size(self):
        return self._n_dnrls

    @overrides(AbstractAcceptsIncomingSynapses.get_synapse_id_by_target)
    def get_synapse_id_by_target(self, target):
        if target == "excitatory":
            return 0
        elif target == "inhibitory":
            return 1
        return None

    @overrides(
        AbstractControlsDestinationOfEdges.get_destinations_for_edge_from)
    def get_destinations_for_edge_from(self, app_edge, partition_id,
                                       graph_mapper,
                                       original_source_machine_vertex):
        if ((app_edge.pre_vertex != self and app_edge.post_vertex == self)
                and not isinstance(original_source_machine_vertex,
                                   OMEMachineVertex)):
            return self._drnl_vertices
        else:
            return []

    @overrides(AbstractControlsSourceOfEdges.get_sources_for_edge_from)
    def get_sources_for_edge_from(self, app_edge, partition_id, graph_mapper,
                                  original_source_machine_vertex):
        if ((app_edge.pre_vertex == self and app_edge.post_vertex != self)
                and isinstance(original_source_machine_vertex,
                               ANGroupMachineVertex)
                and original_source_machine_vertex.is_final_row):
            return [original_source_machine_vertex]
        else:
            return []

    @overrides(
        AbstractAcceptsIncomingSynapses.get_maximum_delay_supported_in_ms)
    def get_maximum_delay_supported_in_ms(self, default_machine_time_step):
        return self.__synapse_manager.get_maximum_delay_supported_in_ms(
            default_machine_time_step)

    @overrides(AbstractAcceptsIncomingSynapses.add_pre_run_connection_holder)
    def add_pre_run_connection_holder(self, connection_holder, projection_edge,
                                      synapse_information):
        self.__synapse_manager.add_pre_run_connection_holder(
            connection_holder, projection_edge, synapse_information)

    def _save_pre_gen_vars(self, file_path):
        """ saves params into a numpy file.
        :param file_path: path to file to store stuff into
        :rtype: None
        """
        numpy.savez_compressed(file_path,
                               n_atoms=self._n_atoms,
                               mv_index_list=self._mv_index_list,
                               parent_index_list=self._parent_index_list,
                               edge_index_list=self._edge_index_list,
                               ihc_seeds=self._ihc_seeds,
                               ome_indices=self._ome_indices)

    @overrides(AbstractAcceptsIncomingSynapses.set_synapse_dynamics)
    def set_synapse_dynamics(self, synapse_dynamics):
        if not isinstance(synapse_dynamics, SynapseDynamicsStatic):
            raise Exception(self.PLASTIC_SYNAPSE_ERROR)
        self._synapse_dynamics = synapse_dynamics

    @overrides(AbstractAcceptsIncomingSynapses.get_connections_from_machine)
    def get_connections_from_machine(self,
                                     transceiver,
                                     placement,
                                     edge,
                                     graph_mapper,
                                     routing_infos,
                                     synapse_information,
                                     local_time_step_map,
                                     using_extra_monitor_cores,
                                     placements=None,
                                     monitor_api=None,
                                     monitor_placement=None,
                                     monitor_cores=None,
                                     handle_time_out_configuration=True,
                                     fixed_routes=None):
        return self.__synapse_manager.get_connections_from_machine(
            transceiver, placement, edge, graph_mapper, routing_infos,
            synapse_information, local_time_step_map,
            using_extra_monitor_cores,
            DRNLMachineVertex.REGIONS.POPULATION_TABLE.value,
            DRNLMachineVertex.REGIONS.SYNAPTIC_MATRIX.value,
            DRNLMachineVertex.REGIONS.DIRECT_MATRIX.value, placements,
            monitor_api, monitor_placement, monitor_cores,
            handle_time_out_configuration, fixed_routes)

    @overrides(AbstractAcceptsIncomingSynapses.clear_connection_cache)
    def clear_connection_cache(self):
        self.__synapse_manager.clear_connection_cache()

    @overrides(SimplePopulationSettable.set_value)
    def set_value(self, key, value):
        SimplePopulationSettable.set_value(self, key, value)
        self._remapping_required = True

    def describe(self):
        """ Returns a human-readable description of the cell or synapse type.

        The output may be customised by specifying a different template\
        together with an associated template engine\
        (see ``pyNN.descriptions``).

        If template is None, then a dictionary containing the template\
        context will be returned.
        """

        parameters = dict()
        for parameter_name in self._model.default_parameters:
            parameters[parameter_name] = self.get_value(parameter_name)

        context = {
            "name": self._model.model_name,
            "default_parameters": self._model.default_parameters,
            "default_initial_values": self._model.default_parameters,
            "parameters": parameters,
        }
        return context

    @overrides(AbstractPopulationSettable.get_value)
    def get_value(self, key):
        if hasattr(self._model, key):
            return getattr(self._model, key)
        raise Exception("Population {} does not have parameter {}".format(
            self, key))

    def _add_to_graph_components(self, machine_graph, graph_mapper, slice,
                                 vertex, resource_tracker):
        """ adds the vertex to all the graph components and resources

        :param machine_graph: machine graph
        :param graph_mapper: graph mapper
        :param slice: slice
        :param vertex: machien vertex
        :param resource_tracker: resource tracker
        :rtype: None
        """

        machine_graph.add_vertex(vertex)
        graph_mapper.add_vertex_mapping(vertex, slice, self)
        resource_tracker.allocate_constrained_resources(
            vertex.resources_required, vertex.constraints)

    def _build_ome_vertex(self, machine_graph, graph_mapper, lo_atom,
                          resource_tracker, timer_period):
        """ builds the ome vertex

        :param machine_graph: machine graph
        :param graph_mapper: graph mapper
        :param lo_atom: lo atom to put into graph mapper slice
        :param resource_tracker: the resource tracker
        :param timer_period: the timer period for all machine verts based on\
        the ear vertex
        :return: the ome vertex and the new low atom
        """
        # build the ome machine vertex
        ome_vertex = OMEMachineVertex(self._model.audio_input, self._model.fs,
                                      self._n_channels, self._model.seq_size,
                                      timer_period, self._profile)

        # allocate resources and updater graphs
        self._add_to_graph_components(machine_graph, graph_mapper,
                                      Slice(lo_atom, lo_atom), ome_vertex,
                                      resource_tracker)
        return ome_vertex, lo_atom + 1

    def _build_drnl_verts(self, machine_graph, graph_mapper, new_low_atom,
                          resource_tracker, ome_vertex, timer_period):
        """ build the drnl verts

        :param machine_graph: machine graph
        :param graph_mapper: graph mapper
        :param new_low_atom: the current low atom count for the graph mapper
        :param resource_tracker: the resource tracker for placement
        :param ome_vertex: the ome vertex to tie edges to
        :param timer_period: the timer period for all machine verts based on\
        the ear vertex
        :return: new low atom count
        """
        pole_index = 0
        for _ in range(self._n_channels):
            drnl_vertex = DRNLMachineVertex(
                self._pole_freqs[pole_index], self._model.fs,
                ome_vertex.n_data_points, pole_index, self._profile,
                self._model.seq_size, self.__synapse_manager, self,
                self._model.n_buffers_in_sdram_total,
                self._drnl_neuron_recorder, timer_period)
            pole_index += 1
            self._add_to_graph_components(machine_graph, graph_mapper,
                                          Slice(new_low_atom, new_low_atom),
                                          drnl_vertex, resource_tracker)
            new_low_atom += 1
            self._drnl_vertices.append(drnl_vertex)
        return new_low_atom

    def _build_edges_between_ome_drnls(self, ome_vertex, machine_graph,
                                       app_edge, graph_mapper):
        """ adds edges between the ome and the drnl vertices

        :param ome_vertex: the ome vertex
        :param machine_graph: the machine graph
        :param app_edge: the app edge covering all these edges
        :param graph_mapper: the graph mapper
        :rtype: None
        """
        for drnl_vert in self._drnl_vertices:
            edge = SpiNNakEarMachineEdge(ome_vertex, drnl_vert)
            machine_graph.add_edge(edge, ome_vertex.OME_PARTITION_ID)
            graph_mapper.add_edge_mapping(edge, app_edge)

    def _build_ihcan_vertices_and_sdram_edges(self, machine_graph,
                                              graph_mapper, new_low_atom,
                                              resource_tracker, app_edge,
                                              sdram_app_edge, timer_period):
        """ builds the ihcan verts and adds edges from drnl to them

        :param machine_graph: machine graph
        :param graph_mapper: the graph mapper
        :param new_low_atom: the lo atom sued to keep the graph mapper happy
        :param resource_tracker: the resource tracker for placement
        :param app_edge: the app edge to link all mc machine edges to
        :param sdram_app_edge: the application sdram edge between drnl and \
        inchan to link all sdram machine edges to.
        :param timer_period: the timer period for all machine verts based on\
        the ear vertex
        :return: iterable of ihcan verts
        """

        ihcans = list()

        # generate ihc seeds
        n_ihcans = self._n_channels * self._model.n_fibres_per_ihc
        seed_index = 0
        random_range = numpy.arange(
            n_ihcans * IHCANMachineVertex.N_SEEDS_PER_IHCAN_VERTEX,
            dtype=numpy.uint32)
        numpy.random.seed(self._model.ihc_seeds_seed)
        ihc_seeds = numpy.random.choice(
            random_range,
            int(n_ihcans * IHCANMachineVertex.N_SEEDS_PER_IHCAN_VERTEX),
            replace=False)

        ihcan_recording_index = 0

        for drnl_vertex in self._drnl_vertices:
            machine_graph.add_outgoing_edge_partition(
                ConstantSDRAMMachinePartition(
                    drnl_vertex.DRNL_SDRAM_PARTITION_ID, drnl_vertex,
                    "sdram edge between drnl vertex {} and its "
                    "IHCANS".format(drnl_vertex.drnl_index)))

            fibres = []
            for _ in range(self._model.n_hsr_per_ihc):
                fibres.append(self.HSR_FLAG)
            for __ in range(self._model.n_msr_per_ihc):
                fibres.append(self.MSR_FLAG)
            for ___ in range(self._model.n_lsr_per_ihc):
                fibres.append(self.LSR_FLAG)

            random.seed(self._model.ihcan_fibre_random_seed)
            random.shuffle(fibres)

            for _ in range(
                    int(self._model.n_fibres_per_ihc /
                        self._n_fibres_per_ihcan_core)):

                # randomly pick fibre types
                chosen_indices = [
                    fibres.pop() for _ in range(self._n_fibres_per_ihcan_core)
                ]

                ihcan_slice = Slice(
                    new_low_atom, new_low_atom +
                    (self._n_fibres_per_ihcan_core * self._model.seq_size) - 1)

                ihcan_recording_slice = Slice(
                    ihcan_recording_index, ihcan_recording_index +
                    (self._n_fibres_per_ihcan_core * self._model.seq_size) - 1)
                ihcan_recording_index += ihcan_recording_slice.n_atoms

                vertex = IHCANMachineVertex(
                    self._model.resample_factor,
                    ihc_seeds[seed_index:seed_index +
                              IHCANMachineVertex.N_SEEDS_PER_IHCAN_VERTEX],
                    self._n_fibres_per_ihcan_core, self._model.ear_index,
                    self._profile, self._model.fs,
                    chosen_indices.count(self.LSR_FLAG),
                    chosen_indices.count(self.MSR_FLAG),
                    chosen_indices.count(self.HSR_FLAG),
                    self._model.n_buffers_in_sdram_total, self._model.seq_size,
                    self._ihcan_neuron_recorder, ihcan_recording_slice,
                    timer_period)

                # update indexes
                new_low_atom += ihcan_slice.n_atoms
                seed_index += IHCANMachineVertex.N_SEEDS_PER_IHCAN_VERTEX

                # add to list of ihcans
                ihcans.append(vertex)

                self._add_to_graph_components(machine_graph, graph_mapper,
                                              ihcan_slice, vertex,
                                              resource_tracker)

                # multicast
                mc_edge = SpiNNakEarMachineEdge(drnl_vertex, vertex,
                                                EdgeTrafficType.MULTICAST)
                machine_graph.add_edge(mc_edge, drnl_vertex.DRNL_PARTITION_ID)
                graph_mapper.add_edge_mapping(mc_edge, app_edge)

                # sdram edge
                sdram_edge = SDRAMMachineEdge(
                    drnl_vertex, vertex, drnl_vertex.sdram_edge_size,
                    "sdram between {} and {}".format(drnl_vertex, vertex))
                machine_graph.add_edge(sdram_edge,
                                       drnl_vertex.DRNL_SDRAM_PARTITION_ID)
                graph_mapper.add_edge_mapping(sdram_edge, sdram_app_edge)
        return ihcans, new_low_atom

    def _build_aggregation_group_vertices_and_edges(self, machine_graph,
                                                    graph_mapper, new_low_atom,
                                                    resource_tracker,
                                                    app_edge):

        to_process = list()
        to_process.extend(self._ihcan_vertices)
        n_child_per_group = self._model.max_input_to_aggregation_group

        for row in range(self._n_group_tree_rows):
            aggregation_verts = list()
            n_row_angs = int(
                numpy.ceil(float(len(to_process)) / n_child_per_group))
            for an in range(n_row_angs):
                final_row_lo_atom = 0
                child_verts = to_process[an * n_child_per_group:an *
                                         n_child_per_group + n_child_per_group]

                # deduce n atoms of the ag node
                n_atoms = 0
                for child in child_verts:
                    n_atoms += child.n_atoms

                # build silce for an node
                an_slice = Slice(new_low_atom, new_low_atom + n_atoms - 1)
                new_low_atom += n_atoms

                # build vert
                final_row = row == self._n_group_tree_rows - 1

                final_row_slice = None
                if final_row:
                    final_row_slice = Slice(final_row_lo_atom,
                                            final_row_lo_atom + n_atoms)
                    final_row_lo_atom += n_atoms

                ag_vertex = ANGroupMachineVertex(n_atoms, len(child_verts),
                                                 final_row, row,
                                                 self._model.ear_index,
                                                 final_row_slice)

                # only store it in the agg array if its in the final row
                if final_row:
                    self._final_agg_vertices.append(ag_vertex)

                # store for the next cycle
                aggregation_verts.append(ag_vertex)

                # update stuff
                self._add_to_graph_components(machine_graph, graph_mapper,
                                              an_slice, ag_vertex,
                                              resource_tracker)

                # add edges
                for child_vert in child_verts:
                    # sort out partition id
                    partition_id = IHCANMachineVertex.IHCAN_PARTITION_ID
                    if isinstance(child_vert, ANGroupMachineVertex):
                        partition_id = \
                            ANGroupMachineVertex.AN_GROUP_PARTITION_IDENTIFIER

                    # add edge and mapping
                    mc_edge = SpiNNakEarMachineEdge(child_vert, ag_vertex)
                    machine_graph.add_edge(mc_edge, partition_id)
                    graph_mapper.add_edge_mapping(mc_edge, app_edge)

            to_process = aggregation_verts

    @inject_items({"application_graph": "MemoryApplicationGraph"})
    @overrides(HandOverToVertex.create_and_add_to_graphs_and_resources,
               additional_arguments={"application_graph"})
    def create_and_add_to_graphs_and_resources(self, resource_tracker,
                                               machine_graph, graph_mapper,
                                               application_graph):

        mc_app_edge = ApplicationEdge(self, self)
        sdram_app_edge = ApplicationEdge(self, self, EdgeTrafficType.SDRAM)
        application_graph.add_edge(mc_app_edge, self.MC_APP_EDGE_PARTITION_ID)
        application_graph.add_edge(sdram_app_edge,
                                   self.SDRAM_APP_EDGE_PARTITION_ID)

        # atom tracker
        current_atom_count = 0

        timer_period = (MICRO_TO_SECOND_CONVERSION * self._model.seq_size /
                        self._model.fs)

        # ome vertex
        ome_vertex, current_atom_count = self._build_ome_vertex(
            machine_graph, graph_mapper, current_atom_count, resource_tracker,
            timer_period)

        # handle the drnl verts
        current_atom_count = self._build_drnl_verts(machine_graph,
                                                    graph_mapper,
                                                    current_atom_count,
                                                    resource_tracker,
                                                    ome_vertex, timer_period)

        # handle edges between ome and drnls
        self._build_edges_between_ome_drnls(ome_vertex, machine_graph,
                                            mc_app_edge, graph_mapper)

        # build the ihcan verts.
        self._ihcan_vertices, current_atom_count = (
            self._build_ihcan_vertices_and_sdram_edges(
                machine_graph, graph_mapper, current_atom_count,
                resource_tracker, mc_app_edge, sdram_app_edge, timer_period))

        # build aggregation group verts and edges
        self._build_aggregation_group_vertices_and_edges(
            machine_graph, graph_mapper, current_atom_count, resource_tracker,
            mc_app_edge)

    @property
    @overrides(ApplicationVertex.n_atoms)
    def n_atoms(self):
        return self._n_atoms

    @staticmethod
    def calculate_n_atoms_for_each_vertex_type(n_group_tree_rows, n_channels,
                                               n_ihc, seq_size):
        # ome atom
        n_atoms = 1

        # dnrl atoms
        n_atoms += n_channels

        # ihcan atoms
        n_angs = n_channels * n_ihc
        n_atoms += (n_angs * seq_size)

        # an group atoms
        for row_index in range(n_group_tree_rows):
            n_atoms += n_angs

        return n_atoms, n_channels, n_angs

    @staticmethod
    def calculate_atoms_per_row(n_channels, n_fibres_per_ihc,
                                n_fibres_per_ihcan,
                                max_input_to_aggregation_group):
        return math.ceil(
            numpy.ceil(
                math.log((n_channels * n_fibres_per_ihc) / n_fibres_per_ihcan,
                         max_input_to_aggregation_group)))

    @overrides(AbstractChangableAfterRun.mark_no_changes)
    def mark_no_changes(self):
        self._remapping_required = False

    @property
    @overrides(AbstractChangableAfterRun.requires_mapping)
    def requires_mapping(self):
        return self._remapping_required

    @overrides(AbstractSpikeRecordable.is_recording_spikes)
    def is_recording_spikes(self):
        return self._ihcan_neuron_recorder.is_recording(
            IHCANMachineVertex.SPIKES)

    @overrides(AbstractSpikeRecordable.get_spikes_sampling_interval)
    def get_spikes_sampling_interval(self, graph_mapper,
                                     local_time_period_map):
        if graph_mapper is None or local_time_period_map is None:
            return self.my_variable_local_time_period(
                get_simulator().default_machine_time_step,
                IHCANMachineVertex.SPIKES)
        return self._ihcan_neuron_recorder.get_neuron_sampling_interval(
            IHCANMachineVertex.SPIKES, self._ihcan_vertices[0],
            local_time_period_map)

    @overrides(AbstractSpikeRecordable.clear_spike_recording)
    def clear_spike_recording(self, buffer_manager, placements, graph_mapper):
        for ihcan_vertex in self._ihcan_vertices:
            placement = placements.get_placement_of_vertex(ihcan_vertex)
            buffer_manager.clear_recorded_data(
                placement.x, placement.y, placement.p,
                IHCANMachineVertex.SPIKE_RECORDING_REGION_ID)

    @overrides(AbstractSpikeRecordable.get_spikes)
    def get_spikes(self, placements, graph_mapper, buffer_manager,
                   local_timer_period_map):
        return self._ihcan_neuron_recorder.get_spikes(
            self._label, buffer_manager, IHCANMachineVertex.RECORDING_REGIONS.
            SPIKE_RECORDING_REGION_ID.value, placements, graph_mapper, self,
            local_timer_period_map)

    @overrides(AbstractSpikeRecordable.set_recording_spikes)
    def set_recording_spikes(self,
                             default_machine_time_step,
                             new_state=True,
                             sampling_interval=None,
                             indexes=None):
        self.set_recording(IHCANMachineVertex.SPIKES, self._timer_period,
                           new_state, sampling_interval, indexes)

    @overrides(AbstractNeuronRecordable.get_recordable_variables)
    def get_recordable_variables(self):
        recordables = list()
        # don't take the drnl spikes, as there's only 1 api for spikes, and the
        # drnl spikes are only there for the recording limitations
        recordables.append(DRNLMachineVertex.MOC)
        recordables.extend(IHCANMachineVertex.RECORDABLES)
        return recordables

    @overrides(AbstractSpikeRecordable.get_spike_machine_vertices)
    def get_spike_machine_vertices(self, graph_mapper):
        return self._ihcan_vertices

    @overrides(AbstractNeuronRecordable.get_machine_vertices_for)
    def get_machine_vertices_for(self, variable, graph_mapper):
        if variable == DRNLMachineVertex.MOC:
            return self._drnl_vertices
        else:
            return self._ihcan_vertices

    @overrides(AbstractNeuronRecordable.clear_recording)
    def clear_recording(self, variable, buffer_manager, placements,
                        graph_mapper):
        if variable == DRNLMachineVertex.MOC:
            for drnl_vertex in self._drnl_vertices:
                placement = placements.get_placement_of_vertex(drnl_vertex)
                buffer_manager.clear_recorded_data(
                    placement.x, placement.y, placement.p,
                    DRNLMachineVertex.MOC_RECORDING_REGION_ID.value)
        if variable == self.SPIKES:
            for ihcan_vertex in self._ihcan_vertices:
                placement = placements.get_placement_of_vertex(ihcan_vertex)
                buffer_manager.clear_recorded_data(
                    placement.x, placement.y, placement.p,
                    (IHCANMachineVertex.RECORDING_REGIONS.
                     SPIKE_RECORDING_REGION_ID.value))
        if variable == self.SPIKE_PROB:
            for ihcan_vertex in self._ihcan_vertices:
                placement = placements.get_placement_of_vertex(ihcan_vertex)
                buffer_manager.clear_recorded_data(
                    placement.x, placement.y, placement.p,
                    (IHCANMachineVertex.RECORDING_REGIONS.
                     SPIKE_PROBABILITY_REGION_ID.value))
        else:
            raise ConfigurationException(
                self.CLEAR_RECORDING_ERROR.format(variable))

    @overrides(AbstractNeuronRecordable.get_neuron_sampling_interval)
    def get_neuron_sampling_interval(self, variable, graph_mapper,
                                     local_time_period_map):
        if graph_mapper is None or local_time_period_map is None:
            return self.my_variable_local_time_period(
                get_simulator().default_machine_time_step, variable)

        if variable == DRNLMachineVertex.MOC:
            return self._drnl_neuron_recorder.get_neuron_sampling_interval(
                variable, self._drnl_vertices[0], local_time_period_map)
        elif variable in IHCANMachineVertex.RECORDABLES:
            return self._ihcan_neuron_recorder.get_neuron_sampling_interval(
                variable, self._ihcan_vertices[0], local_time_period_map)
        else:
            raise Exception(self.SAMPLING_INTERVAL_ERROR.format(variable))

    @overrides(AbstractNeuronRecordable.set_recording)
    def set_recording(self,
                      variable,
                      default_machine_time_step,
                      new_state=True,
                      sampling_interval=None,
                      indexes=None):
        self._change_requires_mapping = not self.is_recording(variable)
        if variable == DRNLMachineVertex.MOC:
            self._drnl_neuron_recorder.set_recording(
                variable, sampling_interval, indexes, self,
                default_machine_time_step, new_state)
        elif variable in IHCANMachineVertex.RECORDABLES:
            self._ihcan_neuron_recorder.set_recording(
                variable, sampling_interval, indexes, self,
                default_machine_time_step, new_state)
        else:
            raise ConfigurationException(self.RECORDING_ERROR.format(variable))

    @overrides(AbstractNeuronRecordable.is_recording)
    def is_recording(self, variable):
        if variable == DRNLMachineVertex.MOC:
            return self._drnl_neuron_recorder.is_recording(variable)
        elif variable in IHCANMachineVertex.RECORDABLES:
            return self._ihcan_neuron_recorder.is_recording(variable)
        else:
            raise ConfigurationException(self.RECORDING_ERROR.format(variable))

    @overrides(AbstractNeuronRecordable.get_recording_slice)
    def get_recording_slice(self, graph_mapper, vertex):
        return vertex.recorded_slice()

    @overrides(AbstractNeuronRecordable.get_data)
    def get_data(self, variable, run_time, placements, graph_mapper,
                 buffer_manager, local_time_period_map):
        if variable == DRNLMachineVertex.MOC:
            return self._drnl_neuron_recorder.get_matrix_data(
                self._label, buffer_manager,
                DRNLMachineVertex.MOC_RECORDABLE_REGION_ID, placements,
                graph_mapper, self, variable, run_time, local_time_period_map)
        elif variable == IHCANMachineVertex.SPIKE_PROB:
            matrix_data = self._ihcan_neuron_recorder.get_matrix_data(
                self._label, buffer_manager, IHCANMachineVertex.
                RECORDING_REGIONS.SPIKE_PROBABILITY_REGION_ID.value,
                placements, graph_mapper, self, variable, run_time,
                local_time_period_map)

            # convert to n fibers per time step.
            new_matrix_data = list()
            for element in matrix_data[0]:
                seq_elements = list()
                for seq_index in range(0, self._model.seq_size):
                    seq_elements.append(
                        element[0 + seq_index::self._model.seq_size])
                for time_step in seq_elements:
                    new_matrix_data.append(time_step)
            return new_matrix_data, matrix_data[1][0:10], matrix_data[2]
        elif variable == IHCANMachineVertex.SPIKES:
            return self._ihcan_neuron_recorder.get_spikes(
                self._label, buffer_manager, IHCANMachineVertex.
                RECORDING_REGIONS.SPIKE_RECORDING_REGION_ID.value, placements,
                graph_mapper, self, run_time)
        else:
            raise ConfigurationException(self.RECORDING_ERROR.format(variable))

    def get_sampling_interval(self, sample_size_window):
        return ((self._timer_period * sample_size_window) *
                MICRO_TO_SECOND_CONVERSION)
class AbstractPopulationVertex(
        ApplicationVertex, AbstractGeneratesDataSpecification,
        AbstractHasAssociatedBinary, AbstractContainsUnits,
        AbstractSpikeRecordable, AbstractNeuronRecordable,
        AbstractProvidesOutgoingPartitionConstraints,
        AbstractProvidesIncomingPartitionConstraints,
        AbstractPopulationInitializable, AbstractPopulationSettable,
        AbstractChangableAfterRun, AbstractRewritesDataSpecification,
        AbstractReadParametersBeforeSet, AbstractAcceptsIncomingSynapses,
        ProvidesKeyToAtomMappingImpl):
    """ Underlying vertex model for Neural Populations.
    """
    __slots__ = [
        "_buffer_size_before_receive", "_change_requires_mapping",
        "_change_requires_neuron_parameters_reload",
        "_incoming_spike_buffer_size", "_maximum_sdram_for_buffering",
        "_minimum_buffer_sdram", "_n_atoms", "_n_profile_samples",
        "_neuron_impl", "_neuron_recorder", "_parameters", "_pynn_model",
        "_receive_buffer_host", "_receive_buffer_port", "_state_variables",
        "_synapse_manager", "_time_between_requests", "_units",
        "_using_auto_pause_and_resume"
    ]

    BASIC_MALLOC_USAGE = 2

    # recording region IDs
    SPIKE_RECORDING_REGION = 0

    # the size of the runtime SDP port data region
    RUNTIME_SDP_PORT_SIZE = 4

    # 8 elements before the start of global parameters
    BYTES_TILL_START_OF_GLOBAL_PARAMETERS = 32

    _n_vertices = 0

    def __init__(self, n_neurons, label, constraints, max_atoms_per_core,
                 spikes_per_second, ring_buffer_sigma,
                 incoming_spike_buffer_size, neuron_impl, pynn_model):
        # pylint: disable=too-many-arguments, too-many-locals
        super(AbstractPopulationVertex, self).__init__(label, constraints,
                                                       max_atoms_per_core)

        self._n_atoms = n_neurons

        # buffer data
        self._incoming_spike_buffer_size = incoming_spike_buffer_size

        # get config from simulator
        config = globals_variables.get_simulator().config

        if incoming_spike_buffer_size is None:
            self._incoming_spike_buffer_size = config.getint(
                "Simulation", "incoming_spike_buffer_size")

        self._neuron_impl = neuron_impl
        self._pynn_model = pynn_model
        self._parameters = SpynnakerRangeDictionary(n_neurons)
        self._state_variables = SpynnakerRangeDictionary(n_neurons)
        self._neuron_impl.add_parameters(self._parameters)
        self._neuron_impl.add_state_variables(self._state_variables)

        # Set up for recording
        recordables = ["spikes"]
        recordables.extend(self._neuron_impl.get_recordable_variables())
        self._neuron_recorder = NeuronRecorder(recordables, n_neurons)

        self._time_between_requests = config.getint("Buffers",
                                                    "time_between_requests")
        self._minimum_buffer_sdram = config.getint("Buffers",
                                                   "minimum_buffer_sdram")
        self._using_auto_pause_and_resume = config.getboolean(
            "Buffers", "use_auto_pause_and_resume")
        self._receive_buffer_host = config.get("Buffers",
                                               "receive_buffer_host")
        self._receive_buffer_port = helpful_functions.read_config_int(
            config, "Buffers", "receive_buffer_port")

        # If live buffering is enabled, set a maximum on the buffer sizes
        spike_buffer_max_size = 0
        variable_buffer_max_size = 0
        self._buffer_size_before_receive = None
        if config.getboolean("Buffers", "enable_buffered_recording"):
            spike_buffer_max_size = config.getint("Buffers",
                                                  "spike_buffer_size")
            variable_buffer_max_size = config.getint("Buffers",
                                                     "variable_buffer_size")

        self._maximum_sdram_for_buffering = [spike_buffer_max_size]
        for _ in self._neuron_impl.get_recordable_variables():
            self._maximum_sdram_for_buffering.append(variable_buffer_max_size)

        # Set up synapse handling
        self._synapse_manager = SynapticManager(
            self._neuron_impl.get_n_synapse_types(), ring_buffer_sigma,
            spikes_per_second, config)

        # bool for if state has changed.
        self._change_requires_mapping = True
        self._change_requires_neuron_parameters_reload = False

        # Set up for profiling
        self._n_profile_samples = helpful_functions.read_config_int(
            config, "Reports", "n_profile_samples")

    @property
    @overrides(ApplicationVertex.n_atoms)
    def n_atoms(self):
        return self._n_atoms

    @inject_items({
        "graph": "MemoryApplicationGraph",
        "n_machine_time_steps": "TotalMachineTimeSteps",
        "machine_time_step": "MachineTimeStep"
    })
    @overrides(ApplicationVertex.get_resources_used_by_atoms,
               additional_arguments={
                   "graph", "n_machine_time_steps", "machine_time_step"
               })
    def get_resources_used_by_atoms(self, vertex_slice, graph,
                                    n_machine_time_steps, machine_time_step):
        # pylint: disable=arguments-differ

        # set resources required from this object
        container = ResourceContainer(
            sdram=SDRAMResource(
                self.get_sdram_usage_for_atoms(vertex_slice, graph,
                                               machine_time_step)),
            dtcm=DTCMResource(self.get_dtcm_usage_for_atoms(vertex_slice)),
            cpu_cycles=CPUCyclesPerTickResource(
                self.get_cpu_usage_for_atoms(vertex_slice)))

        recording_sizes = recording_utilities.get_recording_region_sizes(
            self._get_buffered_sdram(vertex_slice, n_machine_time_steps),
            self._minimum_buffer_sdram, self._maximum_sdram_for_buffering,
            self._using_auto_pause_and_resume)
        container.extend(
            recording_utilities.get_recording_resources(
                recording_sizes, self._receive_buffer_host,
                self._receive_buffer_port))

        # return the total resources.
        return container

    @property
    @overrides(AbstractChangableAfterRun.requires_mapping)
    def requires_mapping(self):
        return self._change_requires_mapping

    @overrides(AbstractChangableAfterRun.mark_no_changes)
    def mark_no_changes(self):
        self._change_requires_mapping = False

    def _get_buffered_sdram_per_timestep(self, vertex_slice):
        values = [
            self._neuron_recorder.get_buffered_sdram_per_timestep(
                "spikes", vertex_slice)
        ]
        for variable in self._neuron_impl.get_recordable_variables():
            values.append(
                self._neuron_recorder.get_buffered_sdram_per_timestep(
                    variable, vertex_slice))
        return values

    def _get_buffered_sdram(self, vertex_slice, n_machine_time_steps):
        values = [
            self._neuron_recorder.get_buffered_sdram("spikes", vertex_slice,
                                                     n_machine_time_steps)
        ]
        for variable in self._neuron_impl.get_recordable_variables():
            values.append(
                self._neuron_recorder.get_buffered_sdram(
                    variable, vertex_slice, n_machine_time_steps))
        return values

    @inject_items({"n_machine_time_steps": "TotalMachineTimeSteps"})
    @overrides(ApplicationVertex.create_machine_vertex,
               additional_arguments={"n_machine_time_steps"})
    def create_machine_vertex(self,
                              vertex_slice,
                              resources_required,
                              n_machine_time_steps,
                              label=None,
                              constraints=None):
        # pylint: disable=too-many-arguments, arguments-differ
        is_recording = len(self._neuron_recorder.recording_variables) > 0
        buffered_sdram_per_timestep = self._get_buffered_sdram_per_timestep(
            vertex_slice)
        buffered_sdram = self._get_buffered_sdram(vertex_slice,
                                                  n_machine_time_steps)
        minimum_buffer_sdram = recording_utilities.get_minimum_buffer_sdram(
            buffered_sdram, self._minimum_buffer_sdram)
        overflow_sdram = self._neuron_recorder.get_sampling_overflow_sdram(
            vertex_slice)
        vertex = PopulationMachineVertex(resources_required, is_recording,
                                         minimum_buffer_sdram,
                                         buffered_sdram_per_timestep, label,
                                         constraints, overflow_sdram)

        AbstractPopulationVertex._n_vertices += 1

        # return machine vertex
        return vertex

    def get_cpu_usage_for_atoms(self, vertex_slice):
        return (_NEURON_BASE_N_CPU_CYCLES + _C_MAIN_BASE_N_CPU_CYCLES +
                (_NEURON_BASE_N_CPU_CYCLES_PER_NEURON * vertex_slice.n_atoms) +
                self._neuron_recorder.get_n_cpu_cycles(vertex_slice.n_atoms) +
                self._neuron_impl.get_n_cpu_cycles(vertex_slice.n_atoms) +
                self._synapse_manager.get_n_cpu_cycles())

    def get_dtcm_usage_for_atoms(self, vertex_slice):
        return (
            _NEURON_BASE_DTCM_USAGE_IN_BYTES +
            self._neuron_impl.get_dtcm_usage_in_bytes(vertex_slice.n_atoms) +
            self._neuron_recorder.get_dtcm_usage_in_bytes(vertex_slice) +
            self._synapse_manager.get_dtcm_usage_in_bytes())

    def _get_sdram_usage_for_neuron_params(self, vertex_slice):
        """ Calculate the SDRAM usage for just the neuron parameters region.

        :param vertex_slice: the slice of atoms.
        :return: The SDRAM required for the neuron region
        """
        return (
            self.BYTES_TILL_START_OF_GLOBAL_PARAMETERS +
            self._neuron_recorder.get_sdram_usage_in_bytes(vertex_slice) +
            self._neuron_impl.get_sdram_usage_in_bytes(vertex_slice.n_atoms))

    def get_sdram_usage_for_atoms(self, vertex_slice, graph,
                                  machine_time_step):
        sdram_requirement = (
            common_constants.SYSTEM_BYTES_REQUIREMENT +
            self._get_sdram_usage_for_neuron_params(vertex_slice) +
            recording_utilities.get_recording_header_size(
                len(self._neuron_impl.get_recordable_variables()) + 1) +
            PopulationMachineVertex.get_provenance_data_size(
                PopulationMachineVertex.N_ADDITIONAL_PROVENANCE_DATA_ITEMS) +
            self._synapse_manager.get_sdram_usage_in_bytes(
                vertex_slice, graph.get_edges_ending_at_vertex(self),
                machine_time_step) +
            (self._get_number_of_mallocs_used_by_dsg() *
             common_constants.SARK_PER_MALLOC_SDRAM_USAGE) +
            profile_utils.get_profile_region_size(self._n_profile_samples))

        return sdram_requirement

    def _get_number_of_mallocs_used_by_dsg(self):
        extra_mallocs = len(self._neuron_recorder.recording_variables)
        return (self.BASIC_MALLOC_USAGE +
                self._synapse_manager.get_number_of_mallocs_used_by_dsg() +
                extra_mallocs)

    def _reserve_memory_regions(self, spec, vertex_slice, vertex):

        spec.comment("\nReserving memory space for data regions:\n\n")

        # Reserve memory:
        spec.reserve_memory_region(
            region=constants.POPULATION_BASED_REGIONS.SYSTEM.value,
            size=common_constants.SYSTEM_BYTES_REQUIREMENT,
            label='System')

        self._reserve_neuron_params_data_region(spec, vertex_slice)

        spec.reserve_memory_region(
            region=constants.POPULATION_BASED_REGIONS.RECORDING.value,
            size=recording_utilities.get_recording_header_size(
                len(self._neuron_impl.get_recordable_variables()) + 1))

        profile_utils.reserve_profile_region(
            spec, constants.POPULATION_BASED_REGIONS.PROFILING.value,
            self._n_profile_samples)

        vertex.reserve_provenance_data_region(spec)

    def _reserve_neuron_params_data_region(self, spec, vertex_slice):
        """ Reserve the neuron parameter data region.

        :param spec: the spec to write the DSG region to
        :param vertex_slice: the slice of atoms from the application vertex
        :return: None
        """
        params_size = self._get_sdram_usage_for_neuron_params(vertex_slice)
        spec.reserve_memory_region(
            region=constants.POPULATION_BASED_REGIONS.NEURON_PARAMS.value,
            size=params_size,
            label='NeuronParams')

    def _write_neuron_parameters(self, spec, key, vertex_slice,
                                 machine_time_step, time_scale_factor):
        # pylint: disable=too-many-arguments
        n_atoms = vertex_slice.n_atoms
        spec.comment(
            "\nWriting Neuron Parameters for {} Neurons:\n".format(n_atoms))

        # Set the focus to the memory region 2 (neuron parameters):
        spec.switch_write_focus(
            region=constants.POPULATION_BASED_REGIONS.NEURON_PARAMS.value)

        # Write the random back off value
        spec.write_value(
            random.randint(0, AbstractPopulationVertex._n_vertices))

        # Write the number of microseconds between sending spikes
        time_between_spikes = ((machine_time_step * time_scale_factor) /
                               (n_atoms * 2.0))
        spec.write_value(data=int(time_between_spikes))

        # Write whether the key is to be used, and then the key, or 0 if it
        # isn't to be used
        if key is None:
            spec.write_value(data=0)
            spec.write_value(data=0)
        else:
            spec.write_value(data=1)
            spec.write_value(data=key)

        # Write the number of neurons in the block:
        spec.write_value(data=n_atoms)

        # Write the number of synapse types
        spec.write_value(data=self._neuron_impl.get_n_synapse_types())

        # Write the size of the incoming spike buffer
        spec.write_value(data=self._incoming_spike_buffer_size)

        # Write the number of variables that can be recorded
        spec.write_value(
            data=len(self._neuron_impl.get_recordable_variables()))

        # Write the recording data
        recording_data = self._neuron_recorder.get_data(vertex_slice)
        spec.write_array(recording_data)

        # Write the neuron parameters
        neuron_data = self._neuron_impl.get_data(self._parameters,
                                                 self._state_variables,
                                                 vertex_slice)
        spec.write_array(neuron_data)

    @inject_items({
        "machine_time_step": "MachineTimeStep",
        "time_scale_factor": "TimeScaleFactor",
        "graph_mapper": "MemoryGraphMapper",
        "routing_info": "MemoryRoutingInfos"
    })
    @overrides(AbstractRewritesDataSpecification.regenerate_data_specification,
               additional_arguments={
                   "machine_time_step", "time_scale_factor", "graph_mapper",
                   "routing_info"
               })
    def regenerate_data_specification(self, spec, placement, machine_time_step,
                                      time_scale_factor, graph_mapper,
                                      routing_info):
        # pylint: disable=too-many-arguments, arguments-differ
        vertex_slice = graph_mapper.get_slice(placement.vertex)

        # reserve the neuron parameters data region
        self._reserve_neuron_params_data_region(
            spec, graph_mapper.get_slice(placement.vertex))

        # write the neuron params into the new DSG region
        self._write_neuron_parameters(
            key=routing_info.get_first_key_from_pre_vertex(
                placement.vertex, constants.SPIKE_PARTITION_ID),
            machine_time_step=machine_time_step,
            spec=spec,
            time_scale_factor=time_scale_factor,
            vertex_slice=vertex_slice)

        # close spec
        spec.end_specification()

    @overrides(AbstractRewritesDataSpecification.
               requires_memory_regions_to_be_reloaded)
    def requires_memory_regions_to_be_reloaded(self):
        return self._change_requires_neuron_parameters_reload

    @overrides(AbstractRewritesDataSpecification.mark_regions_reloaded)
    def mark_regions_reloaded(self):
        self._change_requires_neuron_parameters_reload = False

    @inject_items({
        "machine_time_step": "MachineTimeStep",
        "time_scale_factor": "TimeScaleFactor",
        "graph_mapper": "MemoryGraphMapper",
        "application_graph": "MemoryApplicationGraph",
        "machine_graph": "MemoryMachineGraph",
        "routing_info": "MemoryRoutingInfos",
        "tags": "MemoryTags",
        "n_machine_time_steps": "TotalMachineTimeSteps",
        "placements": "MemoryPlacements",
    })
    @overrides(AbstractGeneratesDataSpecification.generate_data_specification,
               additional_arguments={
                   "machine_time_step",
                   "time_scale_factor",
                   "graph_mapper",
                   "application_graph",
                   "machine_graph",
                   "routing_info",
                   "tags",
                   "n_machine_time_steps",
                   "placements",
               })
    def generate_data_specification(self, spec, placement, machine_time_step,
                                    time_scale_factor, graph_mapper,
                                    application_graph, machine_graph,
                                    routing_info, tags, n_machine_time_steps,
                                    placements):
        # pylint: disable=too-many-arguments, arguments-differ
        vertex = placement.vertex

        spec.comment("\n*** Spec for block of {} neurons ***\n".format(
            self._neuron_impl.model_name))
        vertex_slice = graph_mapper.get_slice(vertex)

        # Reserve memory regions
        self._reserve_memory_regions(spec, vertex_slice, vertex)

        # Declare random number generators and distributions:
        # TODO add random distribution stuff
        # self.write_random_distribution_declarations(spec)

        # Get the key
        key = routing_info.get_first_key_from_pre_vertex(
            vertex, constants.SPIKE_PARTITION_ID)

        # Write the setup region
        spec.switch_write_focus(
            constants.POPULATION_BASED_REGIONS.SYSTEM.value)
        spec.write_array(
            simulation_utilities.get_simulation_header_array(
                self.get_binary_file_name(), machine_time_step,
                time_scale_factor))

        # Write the recording region
        spec.switch_write_focus(
            constants.POPULATION_BASED_REGIONS.RECORDING.value)
        ip_tags = tags.get_ip_tags_for_vertex(vertex)
        recorded_region_sizes = recording_utilities.get_recorded_region_sizes(
            self._get_buffered_sdram(vertex_slice, n_machine_time_steps),
            self._maximum_sdram_for_buffering)
        spec.write_array(
            recording_utilities.get_recording_header_array(
                recorded_region_sizes, self._time_between_requests,
                self._buffer_size_before_receive, ip_tags))

        # Write the neuron parameters
        self._write_neuron_parameters(spec, key, vertex_slice,
                                      machine_time_step, time_scale_factor)

        # write profile data
        profile_utils.write_profile_region_data(
            spec, constants.POPULATION_BASED_REGIONS.PROFILING.value,
            self._n_profile_samples)

        # Get the weight_scale value from the appropriate location
        weight_scale = self._neuron_impl.get_global_weight_scale()

        # allow the synaptic matrix to write its data spec-able data
        self._synapse_manager.write_data_spec(spec, self, vertex_slice, vertex,
                                              placement, machine_graph,
                                              application_graph, routing_info,
                                              graph_mapper, weight_scale,
                                              machine_time_step, placements)

        # End the writing of this specification:
        spec.end_specification()

    @overrides(AbstractHasAssociatedBinary.get_binary_file_name)
    def get_binary_file_name(self):

        # Split binary name into title and extension
        binary_title, binary_extension = os.path.splitext(
            self._neuron_impl.binary_name)

        # Reunite title and extension and return
        return (binary_title + self._synapse_manager.vertex_executable_suffix +
                binary_extension)

    @overrides(AbstractHasAssociatedBinary.get_binary_start_type)
    def get_binary_start_type(self):
        return ExecutableType.USES_SIMULATION_INTERFACE

    @overrides(AbstractSpikeRecordable.is_recording_spikes)
    def is_recording_spikes(self):
        return self._neuron_recorder.is_recording("spikes")

    @overrides(AbstractSpikeRecordable.set_recording_spikes)
    def set_recording_spikes(self,
                             new_state=True,
                             sampling_interval=None,
                             indexes=None):
        self.set_recording("spikes", new_state, sampling_interval, indexes)

    @overrides(AbstractSpikeRecordable.get_spikes)
    def get_spikes(self, placements, graph_mapper, buffer_manager,
                   machine_time_step):
        return self._neuron_recorder.get_spikes(self.label, buffer_manager,
                                                self.SPIKE_RECORDING_REGION,
                                                placements, graph_mapper, self,
                                                machine_time_step)

    @overrides(AbstractNeuronRecordable.get_recordable_variables)
    def get_recordable_variables(self):
        return self._neuron_recorder.get_recordable_variables()

    @overrides(AbstractNeuronRecordable.is_recording)
    def is_recording(self, variable):
        return self._neuron_recorder.is_recording(variable)

    @overrides(AbstractNeuronRecordable.set_recording)
    def set_recording(self,
                      variable,
                      new_state=True,
                      sampling_interval=None,
                      indexes=None):
        self._change_requires_mapping = not self.is_recording(variable)
        self._neuron_recorder.set_recording(variable, new_state,
                                            sampling_interval, indexes)

    @overrides(AbstractNeuronRecordable.get_data)
    def get_data(self, variable, n_machine_time_steps, placements,
                 graph_mapper, buffer_manager, machine_time_step):
        # pylint: disable=too-many-arguments
        index = 0
        if variable != "spikes":
            index = 1 + self._neuron_impl.get_recordable_variable_index(
                variable)
        return self._neuron_recorder.get_matrix_data(self.label,
                                                     buffer_manager, index,
                                                     placements, graph_mapper,
                                                     self, variable,
                                                     n_machine_time_steps)

    @overrides(AbstractNeuronRecordable.get_neuron_sampling_interval)
    def get_neuron_sampling_interval(self, variable):
        return self._neuron_recorder.get_neuron_sampling_interval(variable)

    @overrides(AbstractSpikeRecordable.get_spikes_sampling_interval)
    def get_spikes_sampling_interval(self):
        return self._neuron_recorder.get_neuron_sampling_interval("spikes")

    @overrides(AbstractPopulationInitializable.initialize)
    def initialize(self, variable, value):
        if variable not in self._state_variables:
            raise KeyError("Vertex does not support initialisation of"
                           " parameter {}".format(variable))
        self._state_variables.set_value(variable, value)
        self._change_requires_neuron_parameters_reload = True

    @property
    def initialize_parameters(self):
        return self._pynn_model.default_initial_values.keys()

    def _get_parameter(self, variable):
        if variable.endswith("_init"):
            # method called with "V_init"
            key = variable[:-5]
            if variable in self._state_variables:
                # variable is v and parameter is v_init
                return variable
            elif key in self._state_variables:
                # Oops neuron defines v and not v_init
                return key
        else:
            # method called with "v"
            if variable + "_init" in self._state_variables:
                # variable is v and parameter is v_init
                return variable + "_init"
            if variable in self._state_variables:
                # Oops neuron defines v and not v_init
                return variable

        # parameter not found for this variable
        raise KeyError("No variable {} found in {}".format(
            variable, self._neuron_impl.model_name))

    @overrides(AbstractPopulationInitializable.get_initial_value)
    def get_initial_value(self, variable, selector=None):
        parameter = self._get_parameter(variable)

        ranged_list = self._state_variables[parameter]
        if selector is None:
            return ranged_list
        return ranged_list.get_values(selector)

    @overrides(AbstractPopulationInitializable.set_initial_value)
    def set_initial_value(self, variable, value, selector=None):
        parameter = self._get_parameter(variable)

        ranged_list = self._state_variables[parameter]
        ranged_list.set_value_by_selector(selector, value)

    @property
    def conductance_based(self):
        return self._neuron_impl.is_conductance_based

    @overrides(AbstractPopulationSettable.get_value)
    def get_value(self, key):
        """ Get a property of the overall model.
        """
        if key not in self._parameters:
            raise InvalidParameterType(
                "Population {} does not have parameter {}".format(
                    self._neuron_impl.model_name, key))
        return self._parameters[key]

    @overrides(AbstractPopulationSettable.set_value)
    def set_value(self, key, value):
        """ Set a property of the overall model.
        """
        if key not in self._parameters:
            raise InvalidParameterType(
                "Population {} does not have parameter {}".format(
                    self._neuron_impl.model_name, key))
        self._parameters.set_value(key, value)
        self._change_requires_neuron_parameters_reload = True

    @overrides(AbstractReadParametersBeforeSet.read_parameters_from_machine)
    def read_parameters_from_machine(self, transceiver, placement,
                                     vertex_slice):

        # locate SDRAM address to where the neuron parameters are stored
        neuron_region_sdram_address = \
            helpful_functions.locate_memory_region_for_placement(
                placement,
                constants.POPULATION_BASED_REGIONS.NEURON_PARAMS.value,
                transceiver)

        # shift past the extra stuff before neuron parameters that we don't
        # need to read
        neuron_parameters_sdram_address = (
            neuron_region_sdram_address +
            self.BYTES_TILL_START_OF_GLOBAL_PARAMETERS)

        # get size of neuron params
        size_of_region = self._get_sdram_usage_for_neuron_params(vertex_slice)
        size_of_region -= self.BYTES_TILL_START_OF_GLOBAL_PARAMETERS

        # get data from the machine
        byte_array = transceiver.read_memory(placement.x, placement.y,
                                             neuron_parameters_sdram_address,
                                             size_of_region)

        # Skip the recorder globals as these are not change on machine
        # Just written out in case data is changed and written back
        offset = self._neuron_recorder.get_sdram_usage_in_bytes(vertex_slice)

        # update python neuron parameters with the data
        self._neuron_impl.read_data(byte_array, offset, vertex_slice,
                                    self._parameters, self._state_variables)

    @property
    def weight_scale(self):
        return self._neuron_impl.get_global_weight_scale()

    @property
    def ring_buffer_sigma(self):
        return self._synapse_manager.ring_buffer_sigma

    @ring_buffer_sigma.setter
    def ring_buffer_sigma(self, ring_buffer_sigma):
        self._synapse_manager.ring_buffer_sigma = ring_buffer_sigma

    @property
    def spikes_per_second(self):
        return self._synapse_manager.spikes_per_second

    @spikes_per_second.setter
    def spikes_per_second(self, spikes_per_second):
        self._synapse_manager.spikes_per_second = spikes_per_second

    @property
    def synapse_dynamics(self):
        return self._synapse_manager.synapse_dynamics

    def set_synapse_dynamics(self, synapse_dynamics):
        self._synapse_manager.synapse_dynamics = synapse_dynamics

    def add_pre_run_connection_holder(self, connection_holder, edge,
                                      synapse_info):
        # pylint: disable=arguments-differ
        self._synapse_manager.add_pre_run_connection_holder(
            connection_holder, edge, synapse_info)

    @overrides(AbstractAcceptsIncomingSynapses.get_connections_from_machine)
    def get_connections_from_machine(
            self,
            transceiver,
            placement,
            edge,
            graph_mapper,
            routing_infos,
            synapse_information,
            machine_time_step,
            using_extra_monitor_cores,
            placements=None,
            data_receiver=None,
            sender_extra_monitor_core_placement=None,
            extra_monitor_cores_for_router_timeout=None,
            handle_time_out_configuration=True,
            fixed_routes=None):
        # pylint: disable=too-many-arguments
        return self._synapse_manager.get_connections_from_machine(
            transceiver, placement, edge, graph_mapper, routing_infos,
            synapse_information, machine_time_step, using_extra_monitor_cores,
            placements, data_receiver, sender_extra_monitor_core_placement,
            extra_monitor_cores_for_router_timeout,
            handle_time_out_configuration, fixed_routes)

    def clear_connection_cache(self):
        self._synapse_manager.clear_connection_cache()

    def get_maximum_delay_supported_in_ms(self, machine_time_step):
        return self._synapse_manager.get_maximum_delay_supported_in_ms(
            machine_time_step)

    @overrides(AbstractProvidesIncomingPartitionConstraints.
               get_incoming_partition_constraints)
    def get_incoming_partition_constraints(self, partition):
        """ Gets the constraints for partitions going into this vertex.

        :param partition: partition that goes into this vertex
        :return: list of constraints
        """
        return self._synapse_manager.get_incoming_partition_constraints()

    @overrides(AbstractProvidesOutgoingPartitionConstraints.
               get_outgoing_partition_constraints)
    def get_outgoing_partition_constraints(self, partition):
        """ Gets the constraints for partitions going out of this vertex.

        :param partition: the partition that leaves this vertex
        :return: list of constraints
        """
        return [ContiguousKeyRangeContraint()]

    @overrides(AbstractNeuronRecordable.clear_recording)
    def clear_recording(self, variable, buffer_manager, placements,
                        graph_mapper):
        index = 0
        if variable != "spikes":
            index = 1 + self._neuron_impl.get_recordable_variable_index(
                variable)
        self._clear_recording_region(buffer_manager, placements, graph_mapper,
                                     index)

    @overrides(AbstractSpikeRecordable.clear_spike_recording)
    def clear_spike_recording(self, buffer_manager, placements, graph_mapper):
        self._clear_recording_region(
            buffer_manager, placements, graph_mapper,
            AbstractPopulationVertex.SPIKE_RECORDING_REGION)

    def _clear_recording_region(self, buffer_manager, placements, graph_mapper,
                                recording_region_id):
        """ Clear a recorded data region from the buffer manager.

        :param buffer_manager: the buffer manager object
        :param placements: the placements object
        :param graph_mapper: the graph mapper object
        :param recording_region_id: the recorded region ID for clearing
        :rtype: None
        """
        machine_vertices = graph_mapper.get_machine_vertices(self)
        for machine_vertex in machine_vertices:
            placement = placements.get_placement_of_vertex(machine_vertex)
            buffer_manager.clear_recorded_data(placement.x, placement.y,
                                               placement.p,
                                               recording_region_id)

    @overrides(AbstractContainsUnits.get_units)
    def get_units(self, variable):
        if self._neuron_impl.is_recordable(variable):
            return self._neuron_impl.get_recordable_units(variable)
        if variable not in self._parameters:
            raise Exception("Population {} does not have parameter {}".format(
                self._neuron_impl.model_name, variable))
        return self._neuron_impl.get_units(variable)

    def describe(self):
        """ Get a human-readable description of the cell or synapse type.

        The output may be customised by specifying a different template\
        together with an associated template engine\
        (see ``pyNN.descriptions``).

        If template is None, then a dictionary containing the template context\
        will be returned.
        """
        parameters = dict()
        for parameter_name in self._pynn_model.default_parameters:
            parameters[parameter_name] = self.get_value(parameter_name)

        context = {
            "name": self._neuron_impl.model_name,
            "default_parameters": self._pynn_model.default_parameters,
            "default_initial_values": self._pynn_model.default_parameters,
            "parameters": parameters,
        }
        return context

    def get_synapse_id_by_target(self, target):
        return self._neuron_impl.get_synapse_id_by_target(target)

    def __str__(self):
        return "{} with {} atoms".format(self.label, self.n_atoms)

    def __repr__(self):
        return self.__str__()

    def gen_on_machine(self, vertex_slice):
        return self._synapse_manager.gen_on_machine(vertex_slice)