class SynapseTypeDelta(AbstractSynapseType):
    """ This represents a synapse type with two delta synapses
    """
    __slots__ = ["_data"]

    def __init__(self, n_neurons, initial_input_exc, initial_input_inh):
        self._data = SpynnakerRangeDictionary(size=n_neurons)
        self._data[INITIAL_INPUT_EXC] = initial_input_exc
        self._data[INITIAL_INPUT_INH] = initial_input_inh

    @overrides(AbstractSynapseType.get_n_synapse_types)
    def get_n_synapse_types(self):
        return 2

    @overrides(AbstractSynapseType.get_synapse_id_by_target)
    def get_synapse_id_by_target(self, target):
        if target == "excitatory":
            return 0
        elif target == "inhibitory":
            return 1
        return None

    @overrides(AbstractSynapseType.get_synapse_targets)
    def get_synapse_targets(self):
        return "excitatory", "inhibitory"

    @overrides(AbstractSynapseType.get_n_synapse_type_parameters)
    def get_n_synapse_type_parameters(self):
        return 2

    @overrides(AbstractSynapseType.get_synapse_type_parameters)
    def get_synapse_type_parameters(self):
        return [
            NeuronParameter(self._data[INITIAL_INPUT_EXC], DataType.S1615),
            NeuronParameter(self._data[INITIAL_INPUT_INH], DataType.S1615)
        ]

    @overrides(AbstractSynapseType.get_synapse_type_parameter_types)
    def get_synapse_type_parameter_types(self):
        return []

    @overrides(AbstractSynapseType.get_n_cpu_cycles_per_neuron)
    def get_n_cpu_cycles_per_neuron(self):
        return 0

    @property
    def isyn_exc(self):
        return self._data[INITIAL_INPUT_EXC]

    @isyn_exc.setter
    def isyn_exc(self, new_value):
        self._data.set_value(key=INITIAL_INPUT_EXC, value=new_value)

    @property
    def isyn_inh(self):
        return self._data[INITIAL_INPUT_INH]

    @isyn_inh.setter
    def isyn_inh(self, new_value):
        self._data.set_value(key=INITIAL_INPUT_INH, value=new_value)
예제 #2
0
    def __init__(self, n_neurons, e_rev_E, e_rev_I):
        self._units = {
            E_REV_E: "mV",
            E_REV_I: "mV"}

        self._n_neurons = n_neurons
        self._data = SpynnakerRangeDictionary(size=n_neurons)
        self._data[E_REV_E] = e_rev_E
        self._data[E_REV_I] = e_rev_I
예제 #3
0
class InputTypeConductance(AbstractInputType, AbstractContainsUnits):
    """ The conductance input type
    """
    __slots__ = [
        "_data",
        "_n_neurons",
        "_units"]

    def __init__(self, n_neurons, e_rev_E, e_rev_I):
        self._units = {
            E_REV_E: "mV",
            E_REV_I: "mV"}

        self._n_neurons = n_neurons
        self._data = SpynnakerRangeDictionary(size=n_neurons)
        self._data[E_REV_E] = e_rev_E
        self._data[E_REV_I] = e_rev_I

    @property
    def e_rev_E(self):
        return self._data[E_REV_E]

    @e_rev_E.setter
    def e_rev_E(self, e_rev_E):
        self._data.set_value(key=E_REV_E, value=e_rev_E)

    @property
    def e_rev_I(self):
        return self._data[E_REV_I]

    @e_rev_I.setter
    def e_rev_I(self, e_rev_I):
        self._data.set_value(key=E_REV_I, value=e_rev_I)

    def get_global_weight_scale(self):
        return 1024.0

    def get_n_input_type_parameters(self):
        return 2

    def get_input_type_parameters(self):
        return [
            NeuronParameter(
                self._data[E_REV_E], _CONDUCTANTCE_TYPES.E_REV_E.data_type),
            NeuronParameter(
                self._data[E_REV_I], _CONDUCTANTCE_TYPES.E_REV_I.data_type)
        ]

    def get_input_type_parameter_types(self):
        return [item.data_type for item in _CONDUCTANTCE_TYPES]

    def get_n_cpu_cycles_per_neuron(self, n_synapse_types):
        return 10

    @overrides(AbstractContainsUnits.get_units)
    def get_units(self, variable):
        return self._units[variable]
    def __init__(self, n_neurons, label, constraints, max_atoms_per_core,
                 spikes_per_second, ring_buffer_sigma,
                 incoming_spike_buffer_size, neuron_impl, pynn_model):
        # pylint: disable=too-many-arguments, too-many-locals
        super(AbstractPopulationVertex, self).__init__(label, constraints,
                                                       max_atoms_per_core)

        self.__n_atoms = n_neurons
        self.__n_subvertices = 0
        self.__n_data_specs = 0

        # buffer data
        self.__incoming_spike_buffer_size = incoming_spike_buffer_size

        # get config from simulator
        config = globals_variables.get_simulator().config

        if incoming_spike_buffer_size is None:
            self.__incoming_spike_buffer_size = config.getint(
                "Simulation", "incoming_spike_buffer_size")

        self.__neuron_impl = neuron_impl
        self.__pynn_model = pynn_model
        self._parameters = SpynnakerRangeDictionary(n_neurons)
        self._state_variables = SpynnakerRangeDictionary(n_neurons)
        self.__neuron_impl.add_parameters(self._parameters)
        self.__neuron_impl.add_state_variables(self._state_variables)
        self.__initial_state_variables = None
        self.__updated_state_variables = set()

        # Set up for recording
        recordable_variables = list(
            self.__neuron_impl.get_recordable_variables())
        record_data_types = dict(
            self.__neuron_impl.get_recordable_data_types())
        self.__neuron_recorder = NeuronRecorder(recordable_variables,
                                                record_data_types,
                                                [NeuronRecorder.SPIKES],
                                                n_neurons)

        # Set up synapse handling
        self.__synapse_manager = SynapticManager(
            self.__neuron_impl.get_n_synapse_types(), ring_buffer_sigma,
            spikes_per_second, config)

        # bool for if state has changed.
        self.__change_requires_mapping = True
        self.__change_requires_neuron_parameters_reload = False
        self.__change_requires_data_generation = False
        self.__has_reset_last = True

        # Set up for profiling
        self.__n_profile_samples = helpful_functions.read_config_int(
            config, "Reports", "n_profile_samples")
    def __init__(self, n_neurons, du_th, tau_th, v_thresh):
        self._n_neurons = n_neurons

        self._data = SpynnakerRangeDictionary(size=n_neurons)
        self._data[DU_TH] = du_th
        self._data[DU_TH_INV] = self._data[DU_TH].apply_operation(
            lambda x: 1.0 / x)
        self._data[TAU_TH] = tau_th
        self._data[TAU_TH_INV] = self._data[TAU_TH].apply_operation(
            lambda x: 1.0 / x)
        self._data[V_THRESH] = v_thresh
예제 #6
0
 def test_uniform(self):
     # Need to do setup to get a pynn version
     p.setup(10)
     rd = SpynnakerRangeDictionary(10)
     rd["a"] = RandomDistribution("uniform", parameters_pos=[-65.0, -55.0])
     ranges = rd["a"].get_ranges()
     assert 10 == len(ranges)
    def __init__(
            self, n_neurons, label, constraints, max_atoms_per_core,
            spikes_per_second, ring_buffer_sigma, incoming_spike_buffer_size,
            neuron_impl, pynn_model):
        # pylint: disable=too-many-arguments, too-many-locals
        super(AbstractPopulationVertex, self).__init__(
            label, constraints, max_atoms_per_core)

        self._n_atoms = n_neurons
        self._n_subvertices = 0
        self._n_data_specs = 0

        # buffer data
        self._incoming_spike_buffer_size = incoming_spike_buffer_size

        # get config from simulator
        config = globals_variables.get_simulator().config

        if incoming_spike_buffer_size is None:
            self._incoming_spike_buffer_size = config.getint(
                "Simulation", "incoming_spike_buffer_size")

        self._neuron_impl = neuron_impl
        self._pynn_model = pynn_model
        self._parameters = SpynnakerRangeDictionary(n_neurons)
        self._state_variables = SpynnakerRangeDictionary(n_neurons)
        self._neuron_impl.add_parameters(self._parameters)
        self._neuron_impl.add_state_variables(self._state_variables)

        # Set up for recording
        recordables = ["spikes"]
        recordables.extend(self._neuron_impl.get_recordable_variables())
        self._neuron_recorder = NeuronRecorder(recordables, n_neurons)

        # Set up synapse handling
        self._synapse_manager = SynapticManager(
            self._neuron_impl.get_n_synapse_types(), ring_buffer_sigma,
            spikes_per_second, config)

        # bool for if state has changed.
        self._change_requires_mapping = True
        self._change_requires_neuron_parameters_reload = False

        # Set up for profiling
        self._n_profile_samples = helpful_functions.read_config_int(
            config, "Reports", "n_profile_samples")
예제 #8
0
class ThresholdTypeStatic(AbstractThresholdType, AbstractContainsUnits):
    """ A threshold that is a static value
    """
    __slots__ = ["_data", "_n_neurons", "_units"]

    def __init__(self, n_neurons, v_thresh):
        self._units = {V_THRESH: "mV"}

        self._n_neurons = n_neurons
        self._data = SpynnakerRangeDictionary(size=n_neurons)
        self._data[V_THRESH] = v_thresh

    @property
    def v_thresh(self):
        return self._data[V_THRESH]

    @v_thresh.setter
    def v_thresh(self, v_thresh):
        self._data.set_value(key=V_THRESH, value=v_thresh)

    @overrides(AbstractThresholdType.get_n_threshold_parameters)
    def get_n_threshold_parameters(self):
        return 1

    @overrides(AbstractThresholdType.get_threshold_parameters)
    def get_threshold_parameters(self):
        return [
            NeuronParameter(self._data[V_THRESH],
                            _STATIC_TYPES.V_THRESH.data_type)
        ]

    @overrides(AbstractThresholdType.get_threshold_parameter_types)
    def get_threshold_parameter_types(self):
        return [item.data_type for item in _STATIC_TYPES]

    @overrides(AbstractThresholdType.get_n_cpu_cycles_per_neuron)
    def get_n_cpu_cycles_per_neuron(self):

        # Just a comparison, but 2 just in case!
        return 2

    @overrides(AbstractContainsUnits.get_units)
    def get_units(self, variable):
        return self._units[variable]
    def __init__(self, n_neurons, tau_syn_E, tau_syn_E2, tau_syn_I,
                 initial_input_exc, initial_input_exc2, initial_input_inh):
        # pylint: disable=too-many-arguments
        self._units = {
            TAU_SYN_E: "mV",
            TAU_SYN_E2: "mV",
            TAU_SYN_I: 'mV',
            GSYN_EXC: "uS",
            GSYN_INH: "uS"
        }

        self._n_neurons = n_neurons
        self._data = SpynnakerRangeDictionary(size=n_neurons)
        self._data[TAU_SYN_E] = tau_syn_E
        self._data[TAU_SYN_E2] = tau_syn_E2
        self._data[TAU_SYN_I] = tau_syn_I
        self._data[INITIAL_INPUT_EXC] = initial_input_exc
        self._data[INITIAL_INPUT_EXC2] = initial_input_exc2
        self._data[INITIAL_INPUT_INH] = initial_input_inh
예제 #10
0
    def __init__(self, n_neurons, exc_response, exc_exp_response, tau_syn_E,
                 inh_response, inh_exp_response, tau_syn_I):
        # pylint: disable=too-many-arguments
        self._data = SpynnakerRangeDictionary(size=n_neurons)
        self._data[EXC_RESPONSE] = exc_response
        self._data[EXC_EXP_RESPONSE] = exc_exp_response
        self._data[TAU_SYN_E] = tau_syn_E
        self._data[INH_RESPONSE] = inh_response
        self._data[INH_EXP_RESPONSE] = inh_exp_response
        self._data[TAU_SYN_I] = tau_syn_I

        self._exc_response = convert_param_to_numpy(exc_response, n_neurons)
        self._exc_exp_response = convert_param_to_numpy(
            exc_exp_response, n_neurons)
        self._tau_syn_E = convert_param_to_numpy(tau_syn_E, n_neurons)

        self._inh_response = convert_param_to_numpy(inh_response, n_neurons)
        self._inh_exp_response = convert_param_to_numpy(
            inh_exp_response, n_neurons)
        self._tau_syn_I = convert_param_to_numpy(tau_syn_I, n_neurons)
예제 #11
0
    def __init__(self, n_neurons, v_init, v_rest, tau_m, cm, i_offset):
        # pylint: disable=too-many-arguments
        self._units = {
            V_INIT: 'mV',
            V_REST: 'mV',
            TAU_M: 'ms',
            CM: 'nF',
            I_OFFSET: 'nA'
        }

        self._n_neurons = n_neurons
        if v_init is None:
            v_init = v_rest
        self._data = SpynnakerRangeDictionary(size=n_neurons)
        self._data[V_INIT] = v_init
        self._data[V_REST] = v_rest
        self._data[TAU_M] = tau_m
        self._data[CM] = cm
        self._data[I_OFFSET] = i_offset
        self._data["r_membrane"] = self._data[TAU_M] / self._data[CM]
예제 #12
0
    def __init__(self, n_neurons, a, b, c, d, v_init, u_init, i_offset):
        # pylint: disable=too-many-arguments
        self._units = {
            A: "ms",
            B: "ms",
            C: "mV",
            D: "mV/ms",
            V_INIT: "mV",
            U_INIT: "mV/ms",
            I_OFFSET: "nA"
        }

        self._n_neurons = n_neurons
        self._data = SpynnakerRangeDictionary(size=n_neurons)
        self._data[A] = a
        self._data[B] = b
        self._data[C] = c
        self._data[D] = d
        self._data[V_INIT] = v_init
        self._data[U_INIT] = u_init
        self._data[I_OFFSET] = i_offset
예제 #13
0
 def __copy_ranged_dict(source, merge=None, merge_keys=None):
     target = SpynnakerRangeDictionary(len(source))
     for key in source.keys():
         copy_list = SpynnakerRangedList(len(source))
         if merge_keys is None or key not in merge_keys:
             init_list = source.get_list(key)
         else:
             init_list = merge.get_list(key)
         for start, stop, value in init_list.iter_ranges():
             is_list = (hasattr(value, '__iter__')
                        and not isinstance(value, str))
             copy_list.set_value_by_slice(start, stop, value, is_list)
         target[key] = copy_list
     return target
 def __init__(self, n_neurons, initial_input_exc, initial_input_inh):
     self._data = SpynnakerRangeDictionary(size=n_neurons)
     self._data[INITIAL_INPUT_EXC] = initial_input_exc
     self._data[INITIAL_INPUT_INH] = initial_input_inh
class ThresholdTypeMaassStochastic(AbstractThresholdType):
    """ A stochastic threshold
    """
    __slots__ = ["_data", "_n_neurons"]

    def __init__(self, n_neurons, du_th, tau_th, v_thresh):
        self._n_neurons = n_neurons

        self._data = SpynnakerRangeDictionary(size=n_neurons)
        self._data[DU_TH] = du_th
        self._data[DU_TH_INV] = self._data[DU_TH].apply_operation(
            lambda x: 1.0 / x)
        self._data[TAU_TH] = tau_th
        self._data[TAU_TH_INV] = self._data[TAU_TH].apply_operation(
            lambda x: 1.0 / x)
        self._data[V_THRESH] = v_thresh

    @property
    def v_thresh(self):
        return self._data[V_THRESH]

    @v_thresh.setter
    def v_thresh(self, v_thresh):
        self._data.set_value(key=V_THRESH, value=v_thresh)

    @property
    def du_th(self):
        return self._data[DU_TH]

    @du_th.setter
    def du_th(self, du_th):
        self._data.set_value(key=DU_TH, value=du_th)

    @property
    def tau_th(self):
        return self._data[TAU_TH]

    @tau_th.setter
    def tau_th(self, tau_th):
        self._data.set_value(key=TAU_TH, value=tau_th)

    @property
    def _du_th_inv(self):
        return self._data[DU_TH_INV]

    @property
    def _tau_th_inv(self):
        return self._data[TAU_TH_INV]

    @overrides(AbstractThresholdType.get_n_threshold_parameters)
    def get_n_threshold_parameters(self):
        return 3

    @overrides(AbstractThresholdType.get_threshold_parameters)
    def get_threshold_parameters(self):
        return [
            NeuronParameter(self._data[DU_TH_INV],
                            _MAASS_TYPES.DU_TH.data_type),
            NeuronParameter(self._data[TAU_TH_INV],
                            _MAASS_TYPES.TAU_TH.data_type),
            NeuronParameter(self._data[V_THRESH],
                            _MAASS_TYPES.V_THRESH.data_type)
        ]

    @overrides(AbstractThresholdType.get_threshold_parameter_types)
    def get_threshold_parameter_types(self):
        return [item.data_type for item in _MAASS_TYPES]

    @overrides(AbstractThresholdType.get_n_cpu_cycles_per_neuron)
    def get_n_cpu_cycles_per_neuron(self):
        return 30
예제 #16
0
class NeuronModelLeakyIntegrate(AbstractNeuronModel, AbstractContainsUnits):
    __slots__ = ["_data", "_n_neurons", "_units"]

    def __init__(self, n_neurons, v_init, v_rest, tau_m, cm, i_offset):
        # pylint: disable=too-many-arguments
        self._units = {
            V_INIT: 'mV',
            V_REST: 'mV',
            TAU_M: 'ms',
            CM: 'nF',
            I_OFFSET: 'nA'
        }

        self._n_neurons = n_neurons
        if v_init is None:
            v_init = v_rest
        self._data = SpynnakerRangeDictionary(size=n_neurons)
        self._data[V_INIT] = v_init
        self._data[V_REST] = v_rest
        self._data[TAU_M] = tau_m
        self._data[CM] = cm
        self._data[I_OFFSET] = i_offset
        self._data["r_membrane"] = self._data[TAU_M] / self._data[CM]

    def initialize_v(self, v_init):
        self._data.set_value(key=V_INIT, value=v_init)

    @property
    def v_init(self):
        return self._data[V_INIT]

    @v_init.setter
    def v_init(self, v_init):
        self._data.set_value(key=V_INIT, value=v_init)

    @property
    def v_rest(self):
        return self._data[V_REST]

    @v_rest.setter
    def v_rest(self, v_rest):
        self._data.set_value(key=V_REST, value=v_rest)

    @property
    def tau_m(self):
        return self._data[TAU_M]

    @tau_m.setter
    def tau_m(self, tau_m):
        self._data.set_value(key=TAU_M, value=tau_m)

    @property
    def cm(self):
        return self._data[CM]

    @cm.setter
    def cm(self, cm):
        self._data.set_value(key=CM, value=cm)

    @property
    def i_offset(self):
        return self._data[I_OFFSET]

    @i_offset.setter
    def i_offset(self, i_offset):
        self._data.set_value(key=I_OFFSET, value=i_offset)

    @property
    def _r_membrane(self):
        return self._data[R_MEMBRANE]

    def _exp_tc(self, machine_time_step):
        return self._data[TAU_M].apply_operation(operation=lambda x: numpy.exp(
            float(-machine_time_step) / (1000.0 * x)))

    @overrides(AbstractNeuronModel.get_n_neural_parameters)
    def get_n_neural_parameters(self):
        return 5

    @inject_items({"machine_time_step": "MachineTimeStep"})
    @overrides(AbstractNeuronModel.get_neural_parameters,
               additional_arguments={'machine_time_step'})
    def get_neural_parameters(self, machine_time_step):
        # pylint: disable=arguments-differ
        return [

            # membrane voltage [mV]
            # REAL     V_membrane;
            NeuronParameter(self._data[V_INIT], _IF_TYPES.V_INIT.data_type),

            # membrane resting voltage [mV]
            # REAL     V_rest;
            NeuronParameter(self._data[V_REST], _IF_TYPES.V_REST.data_type),

            # membrane resistance [MOhm]
            # REAL     R_membrane;
            NeuronParameter(self._data[R_MEMBRANE],
                            _IF_TYPES.R_MEMBRANE.data_type),

            # 'fixed' computation parameter - time constant multiplier for
            # closed-form solution
            # exp( -(machine time step in ms)/(R * C) ) [.]
            # REAL     exp_TC;
            NeuronParameter(self._exp_tc(machine_time_step),
                            _IF_TYPES.EXP_TC.data_type),

            # offset current [nA]
            # REAL     I_offset;
            NeuronParameter(self._data[I_OFFSET], _IF_TYPES.I_OFFSET.data_type)
        ]

    @overrides(AbstractNeuronModel.get_neural_parameter_types)
    def get_neural_parameter_types(self):
        return [item.data_type for item in _IF_TYPES]

    @overrides(AbstractNeuronModel.get_n_global_parameters)
    def get_n_global_parameters(self):
        return 0

    @overrides(AbstractNeuronModel.get_global_parameters)
    def get_global_parameters(self):
        return []

    @overrides(AbstractNeuronModel.get_global_parameter_types)
    def get_global_parameter_types(self):
        return []

    @overrides(AbstractNeuronModel.set_neural_parameters)
    def set_neural_parameters(self, neural_parameters, vertex_slice):
        self._data[V_INIT][vertex_slice.as_slice] = neural_parameters[0]

    def get_n_cpu_cycles_per_neuron(self):

        # A bit of a guess
        return 80

    @overrides(AbstractContainsUnits.get_units)
    def get_units(self, variable):
        return self._units[variable]
class AbstractPopulationVertex(
        ApplicationVertex, AbstractGeneratesDataSpecification,
        AbstractHasAssociatedBinary, AbstractContainsUnits,
        AbstractSpikeRecordable, AbstractNeuronRecordable,
        AbstractProvidesOutgoingPartitionConstraints,
        AbstractProvidesIncomingPartitionConstraints,
        AbstractPopulationInitializable, AbstractPopulationSettable,
        AbstractChangableAfterRun, AbstractRewritesDataSpecification,
        AbstractReadParametersBeforeSet, AbstractAcceptsIncomingSynapses,
        ProvidesKeyToAtomMappingImpl, AbstractCanReset):
    """ Underlying vertex model for Neural Populations.
        Not actually abstract.
    """

    __slots__ = [
        "__change_requires_mapping",
        "__change_requires_neuron_parameters_reload",
        "__change_requires_data_generation",
        "__incoming_spike_buffer_size",
        "__n_atoms",
        "__n_profile_samples",
        "__neuron_impl",
        "__neuron_recorder",
        "_parameters",  # See AbstractPyNNModel
        "__pynn_model",
        "_state_variables",  # See AbstractPyNNModel
        "__synapse_manager",
        "__time_between_requests",
        "__units",
        "__n_subvertices",
        "__n_data_specs",
        "__initial_state_variables",
        "__has_reset_last",
        "__updated_state_variables"
    ]

    #: recording region IDs
    _SPIKE_RECORDING_REGION = 0

    #: the size of the runtime SDP port data region
    _RUNTIME_SDP_PORT_SIZE = BYTES_PER_WORD

    #: The Buffer traffic type
    _TRAFFIC_IDENTIFIER = "BufferTraffic"

    # 7 elements before the start of global parameters
    # 1. random back off, 2. micro secs before spike, 3. has key, 4. key,
    # 5. n atoms, 6. n synapse types, 7. incoming spike buffer size.
    _BYTES_TILL_START_OF_GLOBAL_PARAMETERS = 7 * BYTES_PER_WORD

    _n_vertices = 0

    def __init__(self, n_neurons, label, constraints, max_atoms_per_core,
                 spikes_per_second, ring_buffer_sigma,
                 incoming_spike_buffer_size, neuron_impl, pynn_model):
        """
        :param int n_neurons: The number of neurons in the population
        :param str label: The label on the population
        :param list(~pacman.model.constraints.AbstractConstraint) constraints:
            Constraints on where a population's vertices may be placed.
        :param int max_atoms_per_core:
            The maximum number of atoms (neurons) per SpiNNaker core.
        :param spikes_per_second: Expected spike rate
        :type spikes_per_second: float or None
        :param ring_buffer_sigma:
            How many SD above the mean to go for upper bound of ring buffer \
            size; a good starting choice is 5.0. Given length of simulation \
            we can set this for approximate number of saturation events.
        :type ring_buffer_sigma: float or None
        :param incoming_spike_buffer_size:
        :type incoming_spike_buffer_size: int or None
        :param AbstractNeuronImpl neuron_impl:
            The (Python side of the) implementation of the neurons themselves.
        :param AbstractPyNNNeuronModel pynn_model:
            The PyNN neuron model that this vertex is working on behalf of.
        """

        # pylint: disable=too-many-arguments, too-many-locals
        ApplicationVertex.__init__(self, label, constraints,
                                   max_atoms_per_core)

        self.__n_atoms = n_neurons
        self.__n_subvertices = 0
        self.__n_data_specs = 0

        # buffer data
        self.__incoming_spike_buffer_size = incoming_spike_buffer_size

        # get config from simulator
        config = globals_variables.get_simulator().config

        if incoming_spike_buffer_size is None:
            self.__incoming_spike_buffer_size = config.getint(
                "Simulation", "incoming_spike_buffer_size")

        self.__neuron_impl = neuron_impl
        self.__pynn_model = pynn_model
        self._parameters = SpynnakerRangeDictionary(n_neurons)
        self._state_variables = SpynnakerRangeDictionary(n_neurons)
        self.__neuron_impl.add_parameters(self._parameters)
        self.__neuron_impl.add_state_variables(self._state_variables)
        self.__initial_state_variables = None
        self.__updated_state_variables = set()

        # Set up for recording
        recordable_variables = list(
            self.__neuron_impl.get_recordable_variables())
        record_data_types = dict(
            self.__neuron_impl.get_recordable_data_types())
        self.__neuron_recorder = NeuronRecorder(recordable_variables,
                                                record_data_types,
                                                [NeuronRecorder.SPIKES],
                                                n_neurons)

        # Set up synapse handling
        self.__synapse_manager = SynapticManager(
            self.__neuron_impl.get_n_synapse_types(), ring_buffer_sigma,
            spikes_per_second, config)

        # bool for if state has changed.
        self.__change_requires_mapping = True
        self.__change_requires_neuron_parameters_reload = False
        self.__change_requires_data_generation = False
        self.__has_reset_last = True

        # Set up for profiling
        self.__n_profile_samples = helpful_functions.read_config_int(
            config, "Reports", "n_profile_samples")

    @property
    @overrides(ApplicationVertex.n_atoms)
    def n_atoms(self):
        return self.__n_atoms

    @property
    def _neuron_recorder(self):  # for testing only
        return self.__neuron_recorder

    @inject_items({
        "graph": "MemoryApplicationGraph",
        "machine_time_step": "MachineTimeStep"
    })
    @overrides(ApplicationVertex.get_resources_used_by_atoms,
               additional_arguments={"graph", "machine_time_step"})
    def get_resources_used_by_atoms(self, vertex_slice, graph,
                                    machine_time_step):
        # pylint: disable=arguments-differ

        variableSDRAM = self.__neuron_recorder.get_variable_sdram_usage(
            vertex_slice)
        constantSDRAM = ConstantSDRAM(
            self._get_sdram_usage_for_atoms(vertex_slice, graph,
                                            machine_time_step))

        # set resources required from this object
        container = ResourceContainer(
            sdram=variableSDRAM + constantSDRAM,
            dtcm=DTCMResource(self.get_dtcm_usage_for_atoms(vertex_slice)),
            cpu_cycles=CPUCyclesPerTickResource(
                self.get_cpu_usage_for_atoms(vertex_slice)))

        # return the total resources.
        return container

    @property
    @overrides(AbstractChangableAfterRun.requires_mapping)
    def requires_mapping(self):
        return self.__change_requires_mapping

    @property
    @overrides(AbstractChangableAfterRun.requires_data_generation)
    def requires_data_generation(self):
        return self.__change_requires_data_generation

    @overrides(AbstractChangableAfterRun.mark_no_changes)
    def mark_no_changes(self):
        self.__change_requires_mapping = False
        self.__change_requires_data_generation = False

    @overrides(ApplicationVertex.create_machine_vertex)
    def create_machine_vertex(self,
                              vertex_slice,
                              resources_required,
                              label=None,
                              constraints=None):
        self.__n_subvertices += 1
        return PopulationMachineVertex(
            resources_required,
            self.__neuron_recorder.recorded_ids_by_slice(vertex_slice), label,
            constraints, self, vertex_slice)

    def get_cpu_usage_for_atoms(self, vertex_slice):
        """
        :param ~pacman.model.graphs.common.Slice vertex_slice:
        """
        return (_NEURON_BASE_N_CPU_CYCLES + _C_MAIN_BASE_N_CPU_CYCLES +
                (_NEURON_BASE_N_CPU_CYCLES_PER_NEURON * vertex_slice.n_atoms) +
                self.__neuron_recorder.get_n_cpu_cycles(vertex_slice.n_atoms) +
                self.__neuron_impl.get_n_cpu_cycles(vertex_slice.n_atoms) +
                self.__synapse_manager.get_n_cpu_cycles())

    def get_dtcm_usage_for_atoms(self, vertex_slice):
        """
        :param ~pacman.model.graphs.common.Slice vertex_slice:
        """
        return (
            _NEURON_BASE_DTCM_USAGE_IN_BYTES +
            self.__neuron_impl.get_dtcm_usage_in_bytes(vertex_slice.n_atoms) +
            self.__neuron_recorder.get_dtcm_usage_in_bytes(vertex_slice) +
            self.__synapse_manager.get_dtcm_usage_in_bytes())

    def _get_sdram_usage_for_neuron_params(self, vertex_slice):
        """ Calculate the SDRAM usage for just the neuron parameters region.

        :param ~pacman.model.graphs.common.Slice vertex_slice:
            the slice of atoms.
        :return: The SDRAM required for the neuron region
        """
        return (
            self._BYTES_TILL_START_OF_GLOBAL_PARAMETERS +
            self.__neuron_impl.get_sdram_usage_in_bytes(vertex_slice.n_atoms))

    def _get_sdram_usage_for_atoms(self, vertex_slice, graph,
                                   machine_time_step):
        sdram_requirement = (
            SYSTEM_BYTES_REQUIREMENT +
            self._get_sdram_usage_for_neuron_params(vertex_slice) +
            self._neuron_recorder.get_static_sdram_usage(vertex_slice) +
            PopulationMachineVertex.get_provenance_data_size(
                len(PopulationMachineVertex.EXTRA_PROVENANCE_DATA_ENTRIES)) +
            self.__synapse_manager.get_sdram_usage_in_bytes(
                vertex_slice, machine_time_step, graph, self) +
            profile_utils.get_profile_region_size(self.__n_profile_samples) +
            bit_field_utilities.get_estimated_sdram_for_bit_field_region(
                graph, self) +
            bit_field_utilities.get_estimated_sdram_for_key_region(
                graph, self) +
            bit_field_utilities.exact_sdram_for_bit_field_builder_region())
        return sdram_requirement

    def _reserve_memory_regions(self, spec, vertex_slice, vertex,
                                machine_graph, n_key_map):
        """ Reserve the DSG data regions.

        :param ~.DataSpecificationGenerator spec:
            the spec to write the DSG region to
        :param ~pacman.model.graphs.common.Slice vertex_slice:
            the slice of atoms from the application vertex
        :param ~.MachineVertex vertex: this vertex
        :param ~.MachineGraph machine_graph: machine graph
        :param n_key_map: nkey map
        :return: None
        """
        spec.comment("\nReserving memory space for data regions:\n\n")

        # Reserve memory:
        spec.reserve_memory_region(
            region=POPULATION_BASED_REGIONS.SYSTEM.value,
            size=common_constants.SIMULATION_N_BYTES,
            label='System')

        self._reserve_neuron_params_data_region(spec, vertex_slice)

        spec.reserve_memory_region(
            region=POPULATION_BASED_REGIONS.NEURON_RECORDING.value,
            size=self._neuron_recorder.get_static_sdram_usage(vertex_slice),
            label="neuron recording")

        profile_utils.reserve_profile_region(
            spec, POPULATION_BASED_REGIONS.PROFILING.value,
            self.__n_profile_samples)

        # reserve bit field region
        bit_field_utilities.reserve_bit_field_regions(
            spec, machine_graph, n_key_map, vertex,
            POPULATION_BASED_REGIONS.BIT_FIELD_BUILDER.value,
            POPULATION_BASED_REGIONS.BIT_FIELD_FILTER.value,
            POPULATION_BASED_REGIONS.BIT_FIELD_KEY_MAP.value)

        vertex.reserve_provenance_data_region(spec)

    def _reserve_neuron_params_data_region(self, spec, vertex_slice):
        """ Reserve the neuron parameter data region.

        :param ~data_specification.DataSpecificationGenerator spec:
            the spec to write the DSG region to
        :param ~pacman.model.graphs.common.Slice vertex_slice:
            the slice of atoms from the application vertex
        :return: None
        """
        params_size = self._get_sdram_usage_for_neuron_params(vertex_slice)
        spec.reserve_memory_region(
            region=POPULATION_BASED_REGIONS.NEURON_PARAMS.value,
            size=params_size,
            label='NeuronParams')

    @staticmethod
    def __copy_ranged_dict(source, merge=None, merge_keys=None):
        target = SpynnakerRangeDictionary(len(source))
        for key in source.keys():
            copy_list = SpynnakerRangedList(len(source))
            if merge_keys is None or key not in merge_keys:
                init_list = source.get_list(key)
            else:
                init_list = merge.get_list(key)
            for start, stop, value in init_list.iter_ranges():
                is_list = (hasattr(value, '__iter__')
                           and not isinstance(value, str))
                copy_list.set_value_by_slice(start, stop, value, is_list)
            target[key] = copy_list
        return target

    def _write_neuron_parameters(self, spec, key, vertex_slice,
                                 machine_time_step, time_scale_factor):

        # If resetting, reset any state variables that need to be reset
        if (self.__has_reset_last
                and self.__initial_state_variables is not None):
            self._state_variables = self.__copy_ranged_dict(
                self.__initial_state_variables, self._state_variables,
                self.__updated_state_variables)
            self.__initial_state_variables = None

        # If no initial state variables, copy them now
        if self.__has_reset_last:
            self.__initial_state_variables = self.__copy_ranged_dict(
                self._state_variables)

        # Reset things that need resetting
        self.__has_reset_last = False
        self.__updated_state_variables.clear()

        # pylint: disable=too-many-arguments
        n_atoms = vertex_slice.n_atoms
        spec.comment(
            "\nWriting Neuron Parameters for {} Neurons:\n".format(n_atoms))

        # Set the focus to the memory region 2 (neuron parameters):
        spec.switch_write_focus(
            region=constants.POPULATION_BASED_REGIONS.NEURON_PARAMS.value)

        # Write the random back off value
        max_offset = (machine_time_step *
                      time_scale_factor) // _MAX_OFFSET_DENOMINATOR
        spec.write_value(
            int(math.ceil(max_offset / self.__n_subvertices)) *
            self.__n_data_specs)
        self.__n_data_specs += 1

        # Write the number of microseconds between sending spikes
        time_between_spikes = ((machine_time_step * time_scale_factor) /
                               (n_atoms * 2.0))
        spec.write_value(data=int(time_between_spikes))

        # Write whether the key is to be used, and then the key, or 0 if it
        # isn't to be used
        if key is None:
            spec.write_value(data=0)
            spec.write_value(data=0)
        else:
            spec.write_value(data=1)
            spec.write_value(data=key)

        # Write the number of neurons in the block:
        spec.write_value(data=n_atoms)

        # Write the number of synapse types
        spec.write_value(data=self.__neuron_impl.get_n_synapse_types())

        # Write the size of the incoming spike buffer
        spec.write_value(data=self.__incoming_spike_buffer_size)

        # Write the neuron parameters
        neuron_data = self.__neuron_impl.get_data(self._parameters,
                                                  self._state_variables,
                                                  vertex_slice)
        spec.write_array(neuron_data)

    @inject_items({
        "machine_time_step": "MachineTimeStep",
        "time_scale_factor": "TimeScaleFactor",
        "routing_info": "MemoryRoutingInfos"
    })
    @overrides(AbstractRewritesDataSpecification.regenerate_data_specification,
               additional_arguments={
                   "machine_time_step", "time_scale_factor", "routing_info"
               })
    def regenerate_data_specification(self, spec, placement, machine_time_step,
                                      time_scale_factor, routing_info):
        # pylint: disable=too-many-arguments, arguments-differ
        vertex_slice = placement.vertex.vertex_slice

        # reserve the neuron parameters data region
        self._reserve_neuron_params_data_region(spec, vertex_slice)

        # write the neuron params into the new DSG region
        self._write_neuron_parameters(
            key=routing_info.get_first_key_from_pre_vertex(
                placement.vertex, constants.SPIKE_PARTITION_ID),
            machine_time_step=machine_time_step,
            spec=spec,
            time_scale_factor=time_scale_factor,
            vertex_slice=vertex_slice)

        # close spec
        spec.end_specification()

    @overrides(AbstractRewritesDataSpecification.
               requires_memory_regions_to_be_reloaded)
    def requires_memory_regions_to_be_reloaded(self):
        return self.__change_requires_neuron_parameters_reload

    @overrides(AbstractRewritesDataSpecification.mark_regions_reloaded)
    def mark_regions_reloaded(self):
        self.__change_requires_neuron_parameters_reload = False

    @inject_items({
        "machine_time_step": "MachineTimeStep",
        "time_scale_factor": "TimeScaleFactor",
        "application_graph": "MemoryApplicationGraph",
        "machine_graph": "MemoryMachineGraph",
        "routing_info": "MemoryRoutingInfos",
        "data_n_time_steps": "DataNTimeSteps",
        "n_key_map": "MemoryMachinePartitionNKeysMap"
    })
    @overrides(AbstractGeneratesDataSpecification.generate_data_specification,
               additional_arguments={
                   "machine_time_step", "time_scale_factor",
                   "application_graph", "machine_graph", "routing_info",
                   "data_n_time_steps", "n_key_map"
               })
    def generate_data_specification(self, spec, placement, machine_time_step,
                                    time_scale_factor, application_graph,
                                    machine_graph, routing_info,
                                    data_n_time_steps, n_key_map):
        """
        :param machine_time_step: (injected)
        :param time_scale_factor: (injected)
        :param application_graph: (injected)
        :param machine_graph: (injected)
        :param routing_info: (injected)
        :param data_n_time_steps: (injected)
        :param n_key_map: (injected)
        """
        # pylint: disable=too-many-arguments, arguments-differ
        vertex = placement.vertex

        spec.comment("\n*** Spec for block of {} neurons ***\n".format(
            self.__neuron_impl.model_name))
        vertex_slice = vertex.vertex_slice

        # Reserve memory regions
        self._reserve_memory_regions(spec, vertex_slice, vertex, machine_graph,
                                     n_key_map)

        # Declare random number generators and distributions:
        # TODO add random distribution stuff
        # self.write_random_distribution_declarations(spec)

        # Get the key
        key = routing_info.get_first_key_from_pre_vertex(
            vertex, constants.SPIKE_PARTITION_ID)

        # Write the setup region
        spec.switch_write_focus(POPULATION_BASED_REGIONS.SYSTEM.value)
        spec.write_array(
            simulation_utilities.get_simulation_header_array(
                self.get_binary_file_name(), machine_time_step,
                time_scale_factor))

        # Write the neuron recording region
        self._neuron_recorder.write_neuron_recording_region(
            spec, POPULATION_BASED_REGIONS.NEURON_RECORDING.value,
            vertex_slice, data_n_time_steps)

        # Write the neuron parameters
        self._write_neuron_parameters(spec, key, vertex_slice,
                                      machine_time_step, time_scale_factor)

        # write profile data
        profile_utils.write_profile_region_data(
            spec, POPULATION_BASED_REGIONS.PROFILING.value,
            self.__n_profile_samples)

        # Get the weight_scale value from the appropriate location
        weight_scale = self.__neuron_impl.get_global_weight_scale()

        # allow the synaptic matrix to write its data spec-able data
        self.__synapse_manager.write_data_spec(spec, self, vertex_slice,
                                               vertex, placement,
                                               machine_graph,
                                               application_graph, routing_info,
                                               weight_scale, machine_time_step)
        vertex.set_on_chip_generatable_area(
            self.__synapse_manager.host_written_matrix_size,
            self.__synapse_manager.on_chip_written_matrix_size)

        # write up the bitfield builder data
        bit_field_utilities.write_bitfield_init_data(
            spec, vertex, machine_graph, routing_info, n_key_map,
            POPULATION_BASED_REGIONS.BIT_FIELD_BUILDER.value,
            POPULATION_BASED_REGIONS.POPULATION_TABLE.value,
            POPULATION_BASED_REGIONS.SYNAPTIC_MATRIX.value,
            POPULATION_BASED_REGIONS.DIRECT_MATRIX.value,
            POPULATION_BASED_REGIONS.BIT_FIELD_FILTER.value,
            POPULATION_BASED_REGIONS.BIT_FIELD_KEY_MAP.value,
            POPULATION_BASED_REGIONS.STRUCTURAL_DYNAMICS.value,
            isinstance(self.__synapse_manager.synapse_dynamics,
                       AbstractSynapseDynamicsStructural))

        # End the writing of this specification:
        spec.end_specification()

    @overrides(AbstractHasAssociatedBinary.get_binary_file_name)
    def get_binary_file_name(self):

        # Split binary name into title and extension
        binary_title, binary_extension = os.path.splitext(
            self.__neuron_impl.binary_name)

        # Reunite title and extension and return
        return (binary_title +
                self.__synapse_manager.vertex_executable_suffix +
                binary_extension)

    @overrides(AbstractHasAssociatedBinary.get_binary_start_type)
    def get_binary_start_type(self):
        return ExecutableType.USES_SIMULATION_INTERFACE

    @overrides(AbstractSpikeRecordable.is_recording_spikes)
    def is_recording_spikes(self):
        return self.__neuron_recorder.is_recording(NeuronRecorder.SPIKES)

    @overrides(AbstractSpikeRecordable.set_recording_spikes)
    def set_recording_spikes(self,
                             new_state=True,
                             sampling_interval=None,
                             indexes=None):
        self.set_recording(NeuronRecorder.SPIKES, new_state, sampling_interval,
                           indexes)

    @overrides(AbstractSpikeRecordable.get_spikes)
    def get_spikes(self, placements, buffer_manager, machine_time_step):
        return self.__neuron_recorder.get_spikes(
            self.label, buffer_manager,
            len(self.__neuron_impl.get_recordable_variables()), placements,
            self, NeuronRecorder.SPIKES, machine_time_step)

    @overrides(AbstractNeuronRecordable.get_recordable_variables)
    def get_recordable_variables(self):
        return self.__neuron_recorder.get_recordable_variables()

    @overrides(AbstractNeuronRecordable.is_recording)
    def is_recording(self, variable):
        return self.__neuron_recorder.is_recording(variable)

    @overrides(AbstractNeuronRecordable.set_recording)
    def set_recording(self,
                      variable,
                      new_state=True,
                      sampling_interval=None,
                      indexes=None):
        self.__change_requires_mapping = not self.is_recording(variable)
        self.__neuron_recorder.set_recording(variable, new_state,
                                             sampling_interval, indexes)

    @overrides(AbstractNeuronRecordable.get_data)
    def get_data(self, variable, n_machine_time_steps, placements,
                 buffer_manager, machine_time_step):
        # pylint: disable=too-many-arguments
        return self.__neuron_recorder.get_matrix_data(
            self.label, buffer_manager,
            self.__neuron_impl.get_recordable_variable_index(variable),
            placements, self, variable, n_machine_time_steps)

    @overrides(AbstractNeuronRecordable.get_neuron_sampling_interval)
    def get_neuron_sampling_interval(self, variable):
        return self.__neuron_recorder.get_neuron_sampling_interval(variable)

    @overrides(AbstractSpikeRecordable.get_spikes_sampling_interval)
    def get_spikes_sampling_interval(self):
        return self.__neuron_recorder.get_neuron_sampling_interval("spikes")

    @overrides(AbstractPopulationInitializable.initialize)
    def initialize(self, variable, value):
        if not self.__has_reset_last:
            raise Exception(
                "initialize can only be called before the first call to run, "
                "or before the first call to run after a reset")
        if variable not in self._state_variables:
            raise KeyError("Vertex does not support initialisation of"
                           " parameter {}".format(variable))
        self._state_variables.set_value(variable, value)
        self.__updated_state_variables.add(variable)
        self.__change_requires_neuron_parameters_reload = True

    @property
    def initialize_parameters(self):
        """ The names of parameters that have default initial values.

        :rtype: iterable(str)
        """
        return self.__pynn_model.default_initial_values.keys()

    def _get_parameter(self, variable):
        if variable.endswith("_init"):
            # method called with "V_init"
            key = variable[:-5]
            if variable in self._state_variables:
                # variable is v and parameter is v_init
                return variable
            elif key in self._state_variables:
                # Oops neuron defines v and not v_init
                return key
        else:
            # method called with "v"
            if variable + "_init" in self._state_variables:
                # variable is v and parameter is v_init
                return variable + "_init"
            if variable in self._state_variables:
                # Oops neuron defines v and not v_init
                return variable

        # parameter not found for this variable
        raise KeyError("No variable {} found in {}".format(
            variable, self.__neuron_impl.model_name))

    @overrides(AbstractPopulationInitializable.get_initial_value)
    def get_initial_value(self, variable, selector=None):
        parameter = self._get_parameter(variable)

        ranged_list = self._state_variables[parameter]
        if selector is None:
            return ranged_list
        return ranged_list.get_values(selector)

    @overrides(AbstractPopulationInitializable.set_initial_value)
    def set_initial_value(self, variable, value, selector=None):
        if variable not in self._state_variables:
            raise KeyError("Vertex does not support initialisation of"
                           " parameter {}".format(variable))

        parameter = self._get_parameter(variable)
        ranged_list = self._state_variables[parameter]
        ranged_list.set_value_by_selector(selector, value)
        self.__change_requires_neuron_parameters_reload = True

    @property
    def conductance_based(self):
        """
        :rtype: bool
        """
        return self.__neuron_impl.is_conductance_based

    @overrides(AbstractPopulationSettable.get_value)
    def get_value(self, key):
        """ Get a property of the overall model.
        """
        if key not in self._parameters:
            raise InvalidParameterType(
                "Population {} does not have parameter {}".format(
                    self.__neuron_impl.model_name, key))
        return self._parameters[key]

    @overrides(AbstractPopulationSettable.set_value)
    def set_value(self, key, value):
        """ Set a property of the overall model.
        """
        if key not in self._parameters:
            raise InvalidParameterType(
                "Population {} does not have parameter {}".format(
                    self.__neuron_impl.model_name, key))
        self._parameters.set_value(key, value)
        self.__change_requires_neuron_parameters_reload = True

    @overrides(AbstractReadParametersBeforeSet.read_parameters_from_machine)
    def read_parameters_from_machine(self, transceiver, placement,
                                     vertex_slice):

        # locate SDRAM address to where the neuron parameters are stored
        neuron_region_sdram_address = \
            helpful_functions.locate_memory_region_for_placement(
                placement, POPULATION_BASED_REGIONS.NEURON_PARAMS.value,
                transceiver)

        # shift past the extra stuff before neuron parameters that we don't
        # need to read
        neuron_parameters_sdram_address = (
            neuron_region_sdram_address +
            self._BYTES_TILL_START_OF_GLOBAL_PARAMETERS)

        # get size of neuron params
        size_of_region = self._get_sdram_usage_for_neuron_params(vertex_slice)
        size_of_region -= self._BYTES_TILL_START_OF_GLOBAL_PARAMETERS

        # get data from the machine
        byte_array = transceiver.read_memory(placement.x, placement.y,
                                             neuron_parameters_sdram_address,
                                             size_of_region)

        # update python neuron parameters with the data
        self.__neuron_impl.read_data(byte_array, 0, vertex_slice,
                                     self._parameters, self._state_variables)

    @property
    def weight_scale(self):
        """
        :rtype: float
        """
        return self.__neuron_impl.get_global_weight_scale()

    @property
    def ring_buffer_sigma(self):
        return self.__synapse_manager.ring_buffer_sigma

    @ring_buffer_sigma.setter
    def ring_buffer_sigma(self, ring_buffer_sigma):
        self.__synapse_manager.ring_buffer_sigma = ring_buffer_sigma

    def reset_ring_buffer_shifts(self):
        self.__synapse_manager.reset_ring_buffer_shifts()

    @property
    def spikes_per_second(self):
        return self.__synapse_manager.spikes_per_second

    @spikes_per_second.setter
    def spikes_per_second(self, spikes_per_second):
        self.__synapse_manager.spikes_per_second = spikes_per_second

    @property
    def synapse_dynamics(self):
        """
        :rtype: AbstractSynapseDynamics
        """
        return self.__synapse_manager.synapse_dynamics

    def set_synapse_dynamics(self, synapse_dynamics):
        """
        :param AbstractSynapseDynamics synapse_dynamics:
        """
        self.__synapse_manager.synapse_dynamics = synapse_dynamics

    @overrides(AbstractAcceptsIncomingSynapses.add_pre_run_connection_holder)
    def add_pre_run_connection_holder(self, connection_holder, projection_edge,
                                      synapse_information):
        self.__synapse_manager.add_pre_run_connection_holder(
            connection_holder, projection_edge, synapse_information)

    def get_connection_holders(self):
        """
        :rtype: dict(tuple(ProjectionApplicationEdge,SynapseInformation),\
            ConnectionHolder)
        """
        return self.__synapse_manager.get_connection_holders()

    @overrides(AbstractAcceptsIncomingSynapses.get_connections_from_machine)
    def get_connections_from_machine(self,
                                     transceiver,
                                     placement,
                                     edge,
                                     routing_infos,
                                     synapse_information,
                                     machine_time_step,
                                     using_extra_monitor_cores,
                                     placements=None,
                                     monitor_api=None,
                                     fixed_routes=None,
                                     extra_monitor=None):
        # pylint: disable=too-many-arguments
        return self.__synapse_manager.get_connections_from_machine(
            transceiver, placement, edge, routing_infos, synapse_information,
            machine_time_step, using_extra_monitor_cores, placements,
            monitor_api, fixed_routes, extra_monitor)

    def clear_connection_cache(self):
        self.__synapse_manager.clear_connection_cache()

    def get_maximum_delay_supported_in_ms(self, machine_time_step):
        return self.__synapse_manager.get_maximum_delay_supported_in_ms(
            machine_time_step)

    @overrides(AbstractProvidesIncomingPartitionConstraints.
               get_incoming_partition_constraints)
    def get_incoming_partition_constraints(self, partition):
        """ Gets the constraints for partitions going into this vertex.

        :param partition: partition that goes into this vertex
        :return: list of constraints
        """
        return self.__synapse_manager.get_incoming_partition_constraints()

    @overrides(AbstractProvidesOutgoingPartitionConstraints.
               get_outgoing_partition_constraints)
    def get_outgoing_partition_constraints(self, partition):
        """ Gets the constraints for partitions going out of this vertex.

        :param partition: the partition that leaves this vertex
        :return: list of constraints
        """
        return [ContiguousKeyRangeContraint()]

    @overrides(AbstractNeuronRecordable.clear_recording)
    def clear_recording(self, variable, buffer_manager, placements):
        if variable == NeuronRecorder.SPIKES:
            index = len(self.__neuron_impl.get_recordable_variables())
        else:
            index = (
                self.__neuron_impl.get_recordable_variable_index(variable))
        self._clear_recording_region(buffer_manager, placements, index)

    @overrides(AbstractSpikeRecordable.clear_spike_recording)
    def clear_spike_recording(self, buffer_manager, placements):
        self._clear_recording_region(
            buffer_manager, placements,
            len(self.__neuron_impl.get_recordable_variables()))

    def _clear_recording_region(self, buffer_manager, placements,
                                recording_region_id):
        """ Clear a recorded data region from the buffer manager.

        :param buffer_manager: the buffer manager object
        :param placements: the placements object
        :param recording_region_id: the recorded region ID for clearing
        :rtype: None
        """
        for machine_vertex in self.machine_vertices:
            placement = placements.get_placement_of_vertex(machine_vertex)
            buffer_manager.clear_recorded_data(placement.x, placement.y,
                                               placement.p,
                                               recording_region_id)

    @overrides(AbstractContainsUnits.get_units)
    def get_units(self, variable):
        if variable == NeuronRecorder.SPIKES:
            return NeuronRecorder.SPIKES
        if self.__neuron_impl.is_recordable(variable):
            return self.__neuron_impl.get_recordable_units(variable)
        if variable not in self._parameters:
            raise Exception("Population {} does not have parameter {}".format(
                self.__neuron_impl.model_name, variable))
        return self.__neuron_impl.get_units(variable)

    def describe(self):
        """ Get a human-readable description of the cell or synapse type.

        The output may be customised by specifying a different template\
        together with an associated template engine\
        (see :py:mod:`pyNN.descriptions`).

        If template is None, then a dictionary containing the template context\
        will be returned.

        :rtype: dict(str, ...)
        """
        parameters = dict()
        for parameter_name in self.__pynn_model.default_parameters:
            parameters[parameter_name] = self.get_value(parameter_name)

        context = {
            "name": self.__neuron_impl.model_name,
            "default_parameters": self.__pynn_model.default_parameters,
            "default_initial_values": self.__pynn_model.default_parameters,
            "parameters": parameters,
        }
        return context

    def get_synapse_id_by_target(self, target):
        return self.__neuron_impl.get_synapse_id_by_target(target)

    def __str__(self):
        return "{} with {} atoms".format(self.label, self.n_atoms)

    def __repr__(self):
        return self.__str__()

    def gen_on_machine(self, vertex_slice):
        """ True if the synapses of a particular slice of this population \
            should be generated on the machine.

        :param ~pacman.model.graphs.common.Slice vertex_slice:
        """
        return self.__synapse_manager.gen_on_machine(vertex_slice)

    @overrides(AbstractCanReset.reset_to_first_timestep)
    def reset_to_first_timestep(self):
        # Mark that reset has been done, and reload state variables
        self.__has_reset_last = True
        self.__change_requires_neuron_parameters_reload = True

        # If synapses change during the run,
        if self.__synapse_manager.changes_during_run:
            self.__change_requires_data_generation = True
            self.__change_requires_neuron_parameters_reload = False
    def __init__(self, n_neurons, label, constraints, max_atoms_per_core,
                 spikes_per_second, ring_buffer_sigma,
                 incoming_spike_buffer_size, neuron_impl, pynn_model):
        """
        :param int n_neurons: The number of neurons in the population
        :param str label: The label on the population
        :param list(~pacman.model.constraints.AbstractConstraint) constraints:
            Constraints on where a population's vertices may be placed.
        :param int max_atoms_per_core:
            The maximum number of atoms (neurons) per SpiNNaker core.
        :param spikes_per_second: Expected spike rate
        :type spikes_per_second: float or None
        :param ring_buffer_sigma:
            How many SD above the mean to go for upper bound of ring buffer \
            size; a good starting choice is 5.0. Given length of simulation \
            we can set this for approximate number of saturation events.
        :type ring_buffer_sigma: float or None
        :param incoming_spike_buffer_size:
        :type incoming_spike_buffer_size: int or None
        :param AbstractNeuronImpl neuron_impl:
            The (Python side of the) implementation of the neurons themselves.
        :param AbstractPyNNNeuronModel pynn_model:
            The PyNN neuron model that this vertex is working on behalf of.
        """

        # pylint: disable=too-many-arguments, too-many-locals
        ApplicationVertex.__init__(self, label, constraints,
                                   max_atoms_per_core)

        self.__n_atoms = n_neurons
        self.__n_subvertices = 0
        self.__n_data_specs = 0

        # buffer data
        self.__incoming_spike_buffer_size = incoming_spike_buffer_size

        # get config from simulator
        config = globals_variables.get_simulator().config

        if incoming_spike_buffer_size is None:
            self.__incoming_spike_buffer_size = config.getint(
                "Simulation", "incoming_spike_buffer_size")

        self.__neuron_impl = neuron_impl
        self.__pynn_model = pynn_model
        self._parameters = SpynnakerRangeDictionary(n_neurons)
        self._state_variables = SpynnakerRangeDictionary(n_neurons)
        self.__neuron_impl.add_parameters(self._parameters)
        self.__neuron_impl.add_state_variables(self._state_variables)
        self.__initial_state_variables = None
        self.__updated_state_variables = set()

        # Set up for recording
        recordable_variables = list(
            self.__neuron_impl.get_recordable_variables())
        record_data_types = dict(
            self.__neuron_impl.get_recordable_data_types())
        self.__neuron_recorder = NeuronRecorder(recordable_variables,
                                                record_data_types,
                                                [NeuronRecorder.SPIKES],
                                                n_neurons)

        # Set up synapse handling
        self.__synapse_manager = SynapticManager(
            self.__neuron_impl.get_n_synapse_types(), ring_buffer_sigma,
            spikes_per_second, config)

        # bool for if state has changed.
        self.__change_requires_mapping = True
        self.__change_requires_neuron_parameters_reload = False
        self.__change_requires_data_generation = False
        self.__has_reset_last = True

        # Set up for profiling
        self.__n_profile_samples = helpful_functions.read_config_int(
            config, "Reports", "n_profile_samples")
예제 #19
0
    def __init__(self, n_neurons, v_thresh):
        self._units = {V_THRESH: "mV"}

        self._n_neurons = n_neurons
        self._data = SpynnakerRangeDictionary(size=n_neurons)
        self._data[V_THRESH] = v_thresh
예제 #20
0
class SynapseTypeAlpha(AbstractSynapseType):
    __slots__ = [
        "_data", "_exc_exp_response", "_exc_response", "_inh_exp_response",
        "_inh_response", "_tau_syn_E", "_tau_syn_I"
    ]

    def __init__(self, n_neurons, exc_response, exc_exp_response, tau_syn_E,
                 inh_response, inh_exp_response, tau_syn_I):
        # pylint: disable=too-many-arguments
        self._data = SpynnakerRangeDictionary(size=n_neurons)
        self._data[EXC_RESPONSE] = exc_response
        self._data[EXC_EXP_RESPONSE] = exc_exp_response
        self._data[TAU_SYN_E] = tau_syn_E
        self._data[INH_RESPONSE] = inh_response
        self._data[INH_EXP_RESPONSE] = inh_exp_response
        self._data[TAU_SYN_I] = tau_syn_I

        self._exc_response = convert_param_to_numpy(exc_response, n_neurons)
        self._exc_exp_response = convert_param_to_numpy(
            exc_exp_response, n_neurons)
        self._tau_syn_E = convert_param_to_numpy(tau_syn_E, n_neurons)

        self._inh_response = convert_param_to_numpy(inh_response, n_neurons)
        self._inh_exp_response = convert_param_to_numpy(
            inh_exp_response, n_neurons)
        self._tau_syn_I = convert_param_to_numpy(tau_syn_I, n_neurons)

    @property
    def exc_response(self):
        return self._data[EXC_RESPONSE]

    @exc_response.setter
    def exc_response(self, exc_response):
        self._data.set_value(key=EXC_RESPONSE, value=exc_response)

    @property
    def tau_syn_E(self):
        return self._data[TAU_SYN_E]

    @tau_syn_E.setter
    def tau_syn_E(self, tau_syn_E):
        self._data.set_value(key=TAU_SYN_E, value=tau_syn_E)

    @property
    def inh_response(self):
        return self._data[INH_RESPONSE]

    @inh_response.setter
    def inh_response(self, inh_response):
        self._data.set_value(key=INH_RESPONSE, value=inh_response)

    @property
    def tau_syn_I(self):
        return self._data[TAU_SYN_I]

    @tau_syn_I.setter
    def tau_syn_I(self, tau_syn_I):
        self._data.set_value(key=TAU_SYN_I, value=tau_syn_I)

    @overrides(AbstractSynapseType.get_n_synapse_types)
    def get_n_synapse_types(self):
        return 2  # EX and IH

    @overrides(AbstractSynapseType.get_synapse_id_by_target)
    def get_synapse_id_by_target(self, target):

        if target == "excitatory":
            return 0
        elif target == "inhibitory":
            return 1
        return None

    @overrides(AbstractSynapseType.get_synapse_targets)
    def get_synapse_targets(self):
        return "excitatory", "inhibitory"

    @overrides(AbstractSynapseType.get_n_synapse_type_parameters)
    def get_n_synapse_type_parameters(self):
        return 8

    @inject_items({"machine_time_step": "MachineTimeStep"})
    def get_synapse_type_parameters(self, machine_time_step):
        # pylint: disable=arguments-differ
        e_decay, _ = get_exponential_decay_and_init(self._data[TAU_SYN_E],
                                                    machine_time_step)

        i_decay, _ = get_exponential_decay_and_init(self._data[TAU_SYN_I],
                                                    machine_time_step)

        # pre-multiply constants (convert to millisecond)
        dt_divided_by_tau_syn_E_sqr = self._data[TAU_SYN_E].apply_operation(
            lambda x: (float(machine_time_step) / 1000.0) / (x * x))
        dt_divided_by_tau_syn_I_sqr = self._data[TAU_SYN_I].apply_operation(
            lambda x: (float(machine_time_step) / 1000.0) / (x * x))

        return [
            # linear term buffer
            NeuronParameter(self._data[EXC_RESPONSE],
                            _COMB_EXP_TYPES.RESPONSE_EXC.data_type),
            # exponential term buffer
            NeuronParameter(self._data[EXC_EXP_RESPONSE],
                            _COMB_EXP_TYPES.RESPONSE_EXC_EXP.data_type),
            # evolution parameters
            NeuronParameter(dt_divided_by_tau_syn_E_sqr,
                            _COMB_EXP_TYPES.CONST_EXC.data_type),
            NeuronParameter(e_decay, _COMB_EXP_TYPES.DECAY_EXC.data_type),
            NeuronParameter(self._data[INH_RESPONSE],
                            _COMB_EXP_TYPES.RESPONSE_INH.data_type),
            NeuronParameter(self._data[INH_EXP_RESPONSE],
                            _COMB_EXP_TYPES.RESPONSE_INH_EXP.data_type),
            NeuronParameter(dt_divided_by_tau_syn_I_sqr,
                            _COMB_EXP_TYPES.CONST_INH.data_type),
            NeuronParameter(i_decay, _COMB_EXP_TYPES.DECAY_INH.data_type),
        ]

    @overrides(AbstractSynapseType.get_synapse_type_parameter_types)
    def get_synapse_type_parameter_types(self):
        return [item.data_type for item in DataType]

    @overrides(AbstractSynapseType.get_n_cpu_cycles_per_neuron)
    def get_n_cpu_cycles_per_neuron(self):
        # a guess
        return 100
class AbstractPopulationVertex(
        ApplicationVertex, AbstractGeneratesDataSpecification,
        AbstractHasAssociatedBinary, AbstractContainsUnits,
        AbstractSpikeRecordable,  AbstractNeuronRecordable,
        AbstractProvidesOutgoingPartitionConstraints,
        AbstractProvidesIncomingPartitionConstraints,
        AbstractPopulationInitializable, AbstractPopulationSettable,
        AbstractChangableAfterRun,
        AbstractRewritesDataSpecification, AbstractReadParametersBeforeSet,
        AbstractAcceptsIncomingSynapses, ProvidesKeyToAtomMappingImpl):
    """ Underlying vertex model for Neural Populations.
    """
    __slots__ = [
        "_change_requires_mapping",
        "_change_requires_neuron_parameters_reload",
        "_incoming_spike_buffer_size",
        "_n_atoms",
        "_n_profile_samples",
        "_neuron_impl",
        "_neuron_recorder",
        "_parameters",
        "_pynn_model",
        "_state_variables",
        "_synapse_manager",
        "_time_between_requests",
        "_units",
        "_n_subvertices",
        "_n_data_specs"]

    BASIC_MALLOC_USAGE = 2

    # recording region IDs
    SPIKE_RECORDING_REGION = 0

    # the size of the runtime SDP port data region
    RUNTIME_SDP_PORT_SIZE = 4

    # 8 elements before the start of global parameters
    BYTES_TILL_START_OF_GLOBAL_PARAMETERS = 32

    # The Buffer traffic type
    TRAFFIC_IDENTIFIER = "BufferTraffic"

    _n_vertices = 0

    def __init__(
            self, n_neurons, label, constraints, max_atoms_per_core,
            spikes_per_second, ring_buffer_sigma, incoming_spike_buffer_size,
            neuron_impl, pynn_model):
        # pylint: disable=too-many-arguments, too-many-locals
        super(AbstractPopulationVertex, self).__init__(
            label, constraints, max_atoms_per_core)

        self._n_atoms = n_neurons
        self._n_subvertices = 0
        self._n_data_specs = 0

        # buffer data
        self._incoming_spike_buffer_size = incoming_spike_buffer_size

        # get config from simulator
        config = globals_variables.get_simulator().config

        if incoming_spike_buffer_size is None:
            self._incoming_spike_buffer_size = config.getint(
                "Simulation", "incoming_spike_buffer_size")

        self._neuron_impl = neuron_impl
        self._pynn_model = pynn_model
        self._parameters = SpynnakerRangeDictionary(n_neurons)
        self._state_variables = SpynnakerRangeDictionary(n_neurons)
        self._neuron_impl.add_parameters(self._parameters)
        self._neuron_impl.add_state_variables(self._state_variables)

        # Set up for recording
        recordables = ["spikes"]
        recordables.extend(self._neuron_impl.get_recordable_variables())
        self._neuron_recorder = NeuronRecorder(recordables, n_neurons)

        # Set up synapse handling
        self._synapse_manager = SynapticManager(
            self._neuron_impl.get_n_synapse_types(), ring_buffer_sigma,
            spikes_per_second, config)

        # bool for if state has changed.
        self._change_requires_mapping = True
        self._change_requires_neuron_parameters_reload = False

        # Set up for profiling
        self._n_profile_samples = helpful_functions.read_config_int(
            config, "Reports", "n_profile_samples")

    @property
    @overrides(ApplicationVertex.n_atoms)
    def n_atoms(self):
        return self._n_atoms

    @inject_items({
        "graph": "MemoryApplicationGraph",
        "machine_time_step": "MachineTimeStep"
    })
    @overrides(
        ApplicationVertex.get_resources_used_by_atoms,
        additional_arguments={
            "graph", "machine_time_step"
        }
    )
    def get_resources_used_by_atoms(
            self, vertex_slice, graph, machine_time_step):
        # pylint: disable=arguments-differ

        variableSDRAM = self._neuron_recorder.get_variable_sdram_usage(
            vertex_slice)
        constantSDRAM = ConstantSDRAM(
                self._get_sdram_usage_for_atoms(
                    vertex_slice, graph, machine_time_step))

        # set resources required from this object
        container = ResourceContainer(
            sdram=variableSDRAM + constantSDRAM,
            dtcm=DTCMResource(self.get_dtcm_usage_for_atoms(vertex_slice)),
            cpu_cycles=CPUCyclesPerTickResource(
                self.get_cpu_usage_for_atoms(vertex_slice)))

        # return the total resources.
        return container

    @property
    @overrides(AbstractChangableAfterRun.requires_mapping)
    def requires_mapping(self):
        return self._change_requires_mapping

    @overrides(AbstractChangableAfterRun.mark_no_changes)
    def mark_no_changes(self):
        self._change_requires_mapping = False

    # CB: May be dead code
    def _get_buffered_sdram_per_timestep(self, vertex_slice):
        values = [self._neuron_recorder.get_buffered_sdram_per_timestep(
                "spikes", vertex_slice)]
        for variable in self._neuron_impl.get_recordable_variables():
            values.append(
                self._neuron_recorder.get_buffered_sdram_per_timestep(
                    variable, vertex_slice))
        return values

    def _get_buffered_sdram(self, vertex_slice, n_machine_time_steps):
        values = [self._neuron_recorder.get_buffered_sdram(
                "spikes", vertex_slice, n_machine_time_steps)]
        for variable in self._neuron_impl.get_recordable_variables():
            values.append(
                self._neuron_recorder.get_buffered_sdram(
                    variable, vertex_slice, n_machine_time_steps))
        return values

    @overrides(ApplicationVertex.create_machine_vertex)
    def create_machine_vertex(
            self, vertex_slice, resources_required, label=None,
            constraints=None):

        self._n_subvertices += 1
        return PopulationMachineVertex(
            resources_required, self._neuron_recorder.recorded_region_ids,
            label, constraints)

    def get_cpu_usage_for_atoms(self, vertex_slice):
        return (
            _NEURON_BASE_N_CPU_CYCLES + _C_MAIN_BASE_N_CPU_CYCLES +
            (_NEURON_BASE_N_CPU_CYCLES_PER_NEURON * vertex_slice.n_atoms) +
            self._neuron_recorder.get_n_cpu_cycles(vertex_slice.n_atoms) +
            self._neuron_impl.get_n_cpu_cycles(vertex_slice.n_atoms) +
            self._synapse_manager.get_n_cpu_cycles())

    def get_dtcm_usage_for_atoms(self, vertex_slice):
        return (
            _NEURON_BASE_DTCM_USAGE_IN_BYTES +
            self._neuron_impl.get_dtcm_usage_in_bytes(vertex_slice.n_atoms) +
            self._neuron_recorder.get_dtcm_usage_in_bytes(vertex_slice) +
            self._synapse_manager.get_dtcm_usage_in_bytes())

    def _get_sdram_usage_for_neuron_params(self, vertex_slice):
        """ Calculate the SDRAM usage for just the neuron parameters region.

        :param vertex_slice: the slice of atoms.
        :return: The SDRAM required for the neuron region
        """
        return (
            self.BYTES_TILL_START_OF_GLOBAL_PARAMETERS +
            self._neuron_recorder.get_sdram_usage_in_bytes(vertex_slice) +
            self._neuron_impl.get_sdram_usage_in_bytes(vertex_slice.n_atoms))

    def _get_sdram_usage_for_atoms(
            self, vertex_slice, graph, machine_time_step):
        n_record = len(self._neuron_impl.get_recordable_variables()) + 1
        sdram_requirement = (
            common_constants.SYSTEM_BYTES_REQUIREMENT +
            self._get_sdram_usage_for_neuron_params(vertex_slice) +
            recording_utilities.get_recording_header_size(n_record) +
            recording_utilities.get_recording_data_constant_size(n_record) +
            PopulationMachineVertex.get_provenance_data_size(
                PopulationMachineVertex.N_ADDITIONAL_PROVENANCE_DATA_ITEMS) +
            self._synapse_manager.get_sdram_usage_in_bytes(
                vertex_slice, graph.get_edges_ending_at_vertex(self),
                machine_time_step) +
            profile_utils.get_profile_region_size(
                self._n_profile_samples))

        return sdram_requirement

    def _reserve_memory_regions(self, spec, vertex_slice, vertex):

        spec.comment("\nReserving memory space for data regions:\n\n")

        # Reserve memory:
        spec.reserve_memory_region(
            region=constants.POPULATION_BASED_REGIONS.SYSTEM.value,
            size=common_constants.SIMULATION_N_BYTES,
            label='System')

        self._reserve_neuron_params_data_region(spec, vertex_slice)

        spec.reserve_memory_region(
            region=constants.POPULATION_BASED_REGIONS.RECORDING.value,
            size=recording_utilities.get_recording_header_size(
                len(self._neuron_impl.get_recordable_variables()) + 1))

        profile_utils.reserve_profile_region(
            spec, constants.POPULATION_BASED_REGIONS.PROFILING.value,
            self._n_profile_samples)

        vertex.reserve_provenance_data_region(spec)

    def _reserve_neuron_params_data_region(self, spec, vertex_slice):
        """ Reserve the neuron parameter data region.

        :param spec: the spec to write the DSG region to
        :param vertex_slice: the slice of atoms from the application vertex
        :return: None
        """
        params_size = self._get_sdram_usage_for_neuron_params(vertex_slice)
        spec.reserve_memory_region(
            region=constants.POPULATION_BASED_REGIONS.NEURON_PARAMS.value,
            size=params_size,
            label='NeuronParams')

    def _write_neuron_parameters(
            self, spec, key, vertex_slice, machine_time_step,
            time_scale_factor):
        # pylint: disable=too-many-arguments
        n_atoms = vertex_slice.n_atoms
        spec.comment("\nWriting Neuron Parameters for {} Neurons:\n".format(
            n_atoms))

        # Set the focus to the memory region 2 (neuron parameters):
        spec.switch_write_focus(
            region=constants.POPULATION_BASED_REGIONS.NEURON_PARAMS.value)

        # Write the random back off value
        max_offset = (
            machine_time_step * time_scale_factor) // _MAX_OFFSET_DENOMINATOR
        spec.write_value(
            int(math.ceil(max_offset / self._n_subvertices)) *
            self._n_data_specs)
        self._n_data_specs += 1

        # Write the number of microseconds between sending spikes
        time_between_spikes = (
            (machine_time_step * time_scale_factor) / (n_atoms * 2.0))
        spec.write_value(data=int(time_between_spikes))

        # Write whether the key is to be used, and then the key, or 0 if it
        # isn't to be used
        if key is None:
            spec.write_value(data=0)
            spec.write_value(data=0)
        else:
            spec.write_value(data=1)
            spec.write_value(data=key)

        # Write the number of neurons in the block:
        spec.write_value(data=n_atoms)

        # Write the number of synapse types
        spec.write_value(data=self._neuron_impl.get_n_synapse_types())

        # Write the size of the incoming spike buffer
        spec.write_value(data=self._incoming_spike_buffer_size)

        # Write the number of variables that can be recorded
        spec.write_value(
            data=len(self._neuron_impl.get_recordable_variables()))

        # Write the recording data
        recording_data = self._neuron_recorder.get_data(vertex_slice)
        spec.write_array(recording_data)

        # Write the neuron parameters
        neuron_data = self._neuron_impl.get_data(
            self._parameters, self._state_variables, vertex_slice)
        spec.write_array(neuron_data)

    @inject_items({
        "machine_time_step": "MachineTimeStep",
        "time_scale_factor": "TimeScaleFactor",
        "graph_mapper": "MemoryGraphMapper",
        "routing_info": "MemoryRoutingInfos"})
    @overrides(
        AbstractRewritesDataSpecification.regenerate_data_specification,
        additional_arguments={
            "machine_time_step", "time_scale_factor", "graph_mapper",
            "routing_info"})
    def regenerate_data_specification(
            self, spec, placement, machine_time_step, time_scale_factor,
            graph_mapper, routing_info):
        # pylint: disable=too-many-arguments, arguments-differ
        vertex_slice = graph_mapper.get_slice(placement.vertex)

        # reserve the neuron parameters data region
        self._reserve_neuron_params_data_region(
            spec, graph_mapper.get_slice(placement.vertex))

        # write the neuron params into the new DSG region
        self._write_neuron_parameters(
            key=routing_info.get_first_key_from_pre_vertex(
                placement.vertex, constants.SPIKE_PARTITION_ID),
            machine_time_step=machine_time_step, spec=spec,
            time_scale_factor=time_scale_factor,
            vertex_slice=vertex_slice)

        # close spec
        spec.end_specification()

    @overrides(AbstractRewritesDataSpecification
               .requires_memory_regions_to_be_reloaded)
    def requires_memory_regions_to_be_reloaded(self):
        return self._change_requires_neuron_parameters_reload

    @overrides(AbstractRewritesDataSpecification.mark_regions_reloaded)
    def mark_regions_reloaded(self):
        self._change_requires_neuron_parameters_reload = False

    @inject_items({
        "machine_time_step": "MachineTimeStep",
        "time_scale_factor": "TimeScaleFactor",
        "graph_mapper": "MemoryGraphMapper",
        "application_graph": "MemoryApplicationGraph",
        "machine_graph": "MemoryMachineGraph",
        "routing_info": "MemoryRoutingInfos",
        "data_n_time_steps": "DataNTimeSteps",
        "placements": "MemoryPlacements"
    })
    @overrides(
        AbstractGeneratesDataSpecification.generate_data_specification,
        additional_arguments={
            "machine_time_step", "time_scale_factor", "graph_mapper",
            "application_graph", "machine_graph", "routing_info",
            "data_n_time_steps", "placements"
        })
    def generate_data_specification(
            self, spec, placement, machine_time_step, time_scale_factor,
            graph_mapper, application_graph, machine_graph, routing_info,
            data_n_time_steps, placements):
        # pylint: disable=too-many-arguments, arguments-differ
        vertex = placement.vertex

        spec.comment("\n*** Spec for block of {} neurons ***\n".format(
            self._neuron_impl.model_name))
        vertex_slice = graph_mapper.get_slice(vertex)

        # Reserve memory regions
        self._reserve_memory_regions(spec, vertex_slice, vertex)

        # Declare random number generators and distributions:
        # TODO add random distribution stuff
        # self.write_random_distribution_declarations(spec)

        # Get the key
        key = routing_info.get_first_key_from_pre_vertex(
            vertex, constants.SPIKE_PARTITION_ID)

        # Write the setup region
        spec.switch_write_focus(
            constants.POPULATION_BASED_REGIONS.SYSTEM.value)
        spec.write_array(simulation_utilities.get_simulation_header_array(
            self.get_binary_file_name(), machine_time_step,
            time_scale_factor))

        # Write the recording region
        spec.switch_write_focus(
            constants.POPULATION_BASED_REGIONS.RECORDING.value)
        spec.write_array(recording_utilities.get_recording_header_array(
            self._get_buffered_sdram(vertex_slice, data_n_time_steps)))

        # Write the neuron parameters
        self._write_neuron_parameters(
            spec, key, vertex_slice, machine_time_step, time_scale_factor)

        # write profile data
        profile_utils.write_profile_region_data(
            spec, constants.POPULATION_BASED_REGIONS.PROFILING.value,
            self._n_profile_samples)

        # Get the weight_scale value from the appropriate location
        weight_scale = self._neuron_impl.get_global_weight_scale()

        # allow the synaptic matrix to write its data spec-able data
        self._synapse_manager.write_data_spec(
            spec, self, vertex_slice, vertex, placement, machine_graph,
            application_graph, routing_info, graph_mapper,
            weight_scale, machine_time_step, placements)

        # End the writing of this specification:
        spec.end_specification()

    @overrides(AbstractHasAssociatedBinary.get_binary_file_name)
    def get_binary_file_name(self):

        # Split binary name into title and extension
        binary_title, binary_extension = os.path.splitext(
            self._neuron_impl.binary_name)

        # Reunite title and extension and return
        return (binary_title + self._synapse_manager.vertex_executable_suffix +
                binary_extension)

    @overrides(AbstractHasAssociatedBinary.get_binary_start_type)
    def get_binary_start_type(self):
        return ExecutableType.USES_SIMULATION_INTERFACE

    @overrides(AbstractSpikeRecordable.is_recording_spikes)
    def is_recording_spikes(self):
        return self._neuron_recorder.is_recording("spikes")

    @overrides(AbstractSpikeRecordable.set_recording_spikes)
    def set_recording_spikes(
            self, new_state=True, sampling_interval=None, indexes=None):
        self.set_recording("spikes", new_state, sampling_interval, indexes)

    @overrides(AbstractSpikeRecordable.get_spikes)
    def get_spikes(
            self, placements, graph_mapper, buffer_manager, machine_time_step):
        return self._neuron_recorder.get_spikes(
            self.label, buffer_manager, self.SPIKE_RECORDING_REGION,
            placements, graph_mapper, self, machine_time_step)

    @overrides(AbstractNeuronRecordable.get_recordable_variables)
    def get_recordable_variables(self):
        return self._neuron_recorder.get_recordable_variables()

    @overrides(AbstractNeuronRecordable.is_recording)
    def is_recording(self, variable):
        return self._neuron_recorder.is_recording(variable)

    @overrides(AbstractNeuronRecordable.set_recording)
    def set_recording(self, variable, new_state=True, sampling_interval=None,
                      indexes=None):
        self._change_requires_mapping = not self.is_recording(variable)
        self._neuron_recorder.set_recording(
            variable, new_state, sampling_interval, indexes)

    @overrides(AbstractNeuronRecordable.get_data)
    def get_data(self, variable, n_machine_time_steps, placements,
                 graph_mapper, buffer_manager, machine_time_step):
        # pylint: disable=too-many-arguments
        index = 0
        if variable != "spikes":
            index = 1 + self._neuron_impl.get_recordable_variable_index(
                variable)
        return self._neuron_recorder.get_matrix_data(
            self.label, buffer_manager, index, placements, graph_mapper,
            self, variable, n_machine_time_steps)

    @overrides(AbstractNeuronRecordable.get_neuron_sampling_interval)
    def get_neuron_sampling_interval(self, variable):
        return self._neuron_recorder.get_neuron_sampling_interval(variable)

    @overrides(AbstractSpikeRecordable.get_spikes_sampling_interval)
    def get_spikes_sampling_interval(self):
        return self._neuron_recorder.get_neuron_sampling_interval("spikes")

    @overrides(AbstractPopulationInitializable.initialize)
    def initialize(self, variable, value):
        if variable not in self._state_variables:
            raise KeyError(
                "Vertex does not support initialisation of"
                " parameter {}".format(variable))
        self._state_variables.set_value(variable, value)
        self._change_requires_neuron_parameters_reload = True

    @property
    def initialize_parameters(self):
        return self._pynn_model.default_initial_values.keys()

    def _get_parameter(self, variable):
        if variable.endswith("_init"):
            # method called with "V_init"
            key = variable[:-5]
            if variable in self._state_variables:
                # variable is v and parameter is v_init
                return variable
            elif key in self._state_variables:
                # Oops neuron defines v and not v_init
                return key
        else:
            # method called with "v"
            if variable + "_init" in self._state_variables:
                # variable is v and parameter is v_init
                return variable + "_init"
            if variable in self._state_variables:
                # Oops neuron defines v and not v_init
                return variable

        # parameter not found for this variable
        raise KeyError("No variable {} found in {}".format(
            variable, self._neuron_impl.model_name))

    @overrides(AbstractPopulationInitializable.get_initial_value)
    def get_initial_value(self, variable, selector=None):
        parameter = self._get_parameter(variable)

        ranged_list = self._state_variables[parameter]
        if selector is None:
            return ranged_list
        return ranged_list.get_values(selector)

    @overrides(AbstractPopulationInitializable.set_initial_value)
    def set_initial_value(self, variable, value, selector=None):
        parameter = self._get_parameter(variable)

        ranged_list = self._state_variables[parameter]
        ranged_list.set_value_by_selector(selector, value)

    @property
    def conductance_based(self):
        return self._neuron_impl.is_conductance_based

    @overrides(AbstractPopulationSettable.get_value)
    def get_value(self, key):
        """ Get a property of the overall model.
        """
        if key not in self._parameters:
            raise InvalidParameterType(
                "Population {} does not have parameter {}".format(
                    self._neuron_impl.model_name, key))
        return self._parameters[key]

    @overrides(AbstractPopulationSettable.set_value)
    def set_value(self, key, value):
        """ Set a property of the overall model.
        """
        if key not in self._parameters:
            raise InvalidParameterType(
                "Population {} does not have parameter {}".format(
                    self._neuron_impl.model_name, key))
        self._parameters.set_value(key, value)
        self._change_requires_neuron_parameters_reload = True

    @overrides(AbstractReadParametersBeforeSet.read_parameters_from_machine)
    def read_parameters_from_machine(
            self, transceiver, placement, vertex_slice):

        # locate SDRAM address to where the neuron parameters are stored
        neuron_region_sdram_address = \
            helpful_functions.locate_memory_region_for_placement(
                placement,
                constants.POPULATION_BASED_REGIONS.NEURON_PARAMS.value,
                transceiver)

        # shift past the extra stuff before neuron parameters that we don't
        # need to read
        neuron_parameters_sdram_address = (
            neuron_region_sdram_address +
            self.BYTES_TILL_START_OF_GLOBAL_PARAMETERS)

        # get size of neuron params
        size_of_region = self._get_sdram_usage_for_neuron_params(vertex_slice)
        size_of_region -= self.BYTES_TILL_START_OF_GLOBAL_PARAMETERS

        # get data from the machine
        byte_array = transceiver.read_memory(
            placement.x, placement.y, neuron_parameters_sdram_address,
            size_of_region)

        # Skip the recorder globals as these are not change on machine
        # Just written out in case data is changed and written back
        offset = self._neuron_recorder.get_sdram_usage_in_bytes(
            vertex_slice)

        # update python neuron parameters with the data
        self._neuron_impl.read_data(
            byte_array, offset, vertex_slice, self._parameters,
            self._state_variables)

    @property
    def weight_scale(self):
        return self._neuron_impl.get_global_weight_scale()

    @property
    def ring_buffer_sigma(self):
        return self._synapse_manager.ring_buffer_sigma

    @ring_buffer_sigma.setter
    def ring_buffer_sigma(self, ring_buffer_sigma):
        self._synapse_manager.ring_buffer_sigma = ring_buffer_sigma

    @property
    def spikes_per_second(self):
        return self._synapse_manager.spikes_per_second

    @spikes_per_second.setter
    def spikes_per_second(self, spikes_per_second):
        self._synapse_manager.spikes_per_second = spikes_per_second

    @property
    def synapse_dynamics(self):
        return self._synapse_manager.synapse_dynamics

    def set_synapse_dynamics(self, synapse_dynamics):
        self._synapse_manager.synapse_dynamics = synapse_dynamics

    def add_pre_run_connection_holder(
            self, connection_holder, edge, synapse_info):
        # pylint: disable=arguments-differ
        self._synapse_manager.add_pre_run_connection_holder(
            connection_holder, edge, synapse_info)

    @overrides(AbstractAcceptsIncomingSynapses.get_connections_from_machine)
    def get_connections_from_machine(
            self, transceiver, placement, edge, graph_mapper, routing_infos,
            synapse_information, machine_time_step, using_extra_monitor_cores,
            placements=None, data_receiver=None,
            sender_extra_monitor_core_placement=None,
            extra_monitor_cores_for_router_timeout=None,
            handle_time_out_configuration=True, fixed_routes=None):
        # pylint: disable=too-many-arguments
        return self._synapse_manager.get_connections_from_machine(
            transceiver, placement, edge, graph_mapper,
            routing_infos, synapse_information, machine_time_step,
            using_extra_monitor_cores, placements, data_receiver,
            sender_extra_monitor_core_placement,
            extra_monitor_cores_for_router_timeout,
            handle_time_out_configuration, fixed_routes)

    def clear_connection_cache(self):
        self._synapse_manager.clear_connection_cache()

    def get_maximum_delay_supported_in_ms(self, machine_time_step):
        return self._synapse_manager.get_maximum_delay_supported_in_ms(
            machine_time_step)

    @overrides(AbstractProvidesIncomingPartitionConstraints.
               get_incoming_partition_constraints)
    def get_incoming_partition_constraints(self, partition):
        """ Gets the constraints for partitions going into this vertex.

        :param partition: partition that goes into this vertex
        :return: list of constraints
        """
        return self._synapse_manager.get_incoming_partition_constraints()

    @overrides(AbstractProvidesOutgoingPartitionConstraints.
               get_outgoing_partition_constraints)
    def get_outgoing_partition_constraints(self, partition):
        """ Gets the constraints for partitions going out of this vertex.

        :param partition: the partition that leaves this vertex
        :return: list of constraints
        """
        return [ContiguousKeyRangeContraint()]

    @overrides(
        AbstractNeuronRecordable.clear_recording)
    def clear_recording(
            self, variable, buffer_manager, placements, graph_mapper):
        index = 0
        if variable != "spikes":
            index = 1 + self._neuron_impl.get_recordable_variable_index(
                variable)
        self._clear_recording_region(
            buffer_manager, placements, graph_mapper, index)

    @overrides(AbstractSpikeRecordable.clear_spike_recording)
    def clear_spike_recording(self, buffer_manager, placements, graph_mapper):
        self._clear_recording_region(
            buffer_manager, placements, graph_mapper,
            AbstractPopulationVertex.SPIKE_RECORDING_REGION)

    def _clear_recording_region(
            self, buffer_manager, placements, graph_mapper,
            recording_region_id):
        """ Clear a recorded data region from the buffer manager.

        :param buffer_manager: the buffer manager object
        :param placements: the placements object
        :param graph_mapper: the graph mapper object
        :param recording_region_id: the recorded region ID for clearing
        :rtype: None
        """
        machine_vertices = graph_mapper.get_machine_vertices(self)
        for machine_vertex in machine_vertices:
            placement = placements.get_placement_of_vertex(machine_vertex)
            buffer_manager.clear_recorded_data(
                placement.x, placement.y, placement.p, recording_region_id)

    @overrides(AbstractContainsUnits.get_units)
    def get_units(self, variable):
        if self._neuron_impl.is_recordable(variable):
            return self._neuron_impl.get_recordable_units(variable)
        if variable not in self._parameters:
            raise Exception("Population {} does not have parameter {}".format(
                self._neuron_impl.model_name, variable))
        return self._neuron_impl.get_units(variable)

    def describe(self):
        """ Get a human-readable description of the cell or synapse type.

        The output may be customised by specifying a different template\
        together with an associated template engine\
        (see ``pyNN.descriptions``).

        If template is None, then a dictionary containing the template context\
        will be returned.
        """
        parameters = dict()
        for parameter_name in self._pynn_model.default_parameters:
            parameters[parameter_name] = self.get_value(parameter_name)

        context = {
            "name": self._neuron_impl.model_name,
            "default_parameters": self._pynn_model.default_parameters,
            "default_initial_values": self._pynn_model.default_parameters,
            "parameters": parameters,
        }
        return context

    def get_synapse_id_by_target(self, target):
        return self._neuron_impl.get_synapse_id_by_target(target)

    def __str__(self):
        return "{} with {} atoms".format(self.label, self.n_atoms)

    def __repr__(self):
        return self.__str__()

    def gen_on_machine(self, vertex_slice):
        return self._synapse_manager.gen_on_machine(vertex_slice)
    def __init__(self, n_neurons, label, constraints, max_atoms_per_core,
                 spikes_per_second, ring_buffer_sigma,
                 incoming_spike_buffer_size, neuron_impl, pynn_model):
        # pylint: disable=too-many-arguments, too-many-locals
        super(AbstractPopulationVertex, self).__init__(label, constraints,
                                                       max_atoms_per_core)

        self._n_atoms = n_neurons

        # buffer data
        self._incoming_spike_buffer_size = incoming_spike_buffer_size

        # get config from simulator
        config = globals_variables.get_simulator().config

        if incoming_spike_buffer_size is None:
            self._incoming_spike_buffer_size = config.getint(
                "Simulation", "incoming_spike_buffer_size")

        self._neuron_impl = neuron_impl
        self._pynn_model = pynn_model
        self._parameters = SpynnakerRangeDictionary(n_neurons)
        self._state_variables = SpynnakerRangeDictionary(n_neurons)
        self._neuron_impl.add_parameters(self._parameters)
        self._neuron_impl.add_state_variables(self._state_variables)

        # Set up for recording
        recordables = ["spikes"]
        recordables.extend(self._neuron_impl.get_recordable_variables())
        self._neuron_recorder = NeuronRecorder(recordables, n_neurons)

        self._time_between_requests = config.getint("Buffers",
                                                    "time_between_requests")
        self._minimum_buffer_sdram = config.getint("Buffers",
                                                   "minimum_buffer_sdram")
        self._using_auto_pause_and_resume = config.getboolean(
            "Buffers", "use_auto_pause_and_resume")
        self._receive_buffer_host = config.get("Buffers",
                                               "receive_buffer_host")
        self._receive_buffer_port = helpful_functions.read_config_int(
            config, "Buffers", "receive_buffer_port")

        # If live buffering is enabled, set a maximum on the buffer sizes
        spike_buffer_max_size = 0
        variable_buffer_max_size = 0
        self._buffer_size_before_receive = None
        if config.getboolean("Buffers", "enable_buffered_recording"):
            spike_buffer_max_size = config.getint("Buffers",
                                                  "spike_buffer_size")
            variable_buffer_max_size = config.getint("Buffers",
                                                     "variable_buffer_size")

        self._maximum_sdram_for_buffering = [spike_buffer_max_size]
        for _ in self._neuron_impl.get_recordable_variables():
            self._maximum_sdram_for_buffering.append(variable_buffer_max_size)

        # Set up synapse handling
        self._synapse_manager = SynapticManager(
            self._neuron_impl.get_n_synapse_types(), ring_buffer_sigma,
            spikes_per_second, config)

        # bool for if state has changed.
        self._change_requires_mapping = True
        self._change_requires_neuron_parameters_reload = False

        # Set up for profiling
        self._n_profile_samples = helpful_functions.read_config_int(
            config, "Reports", "n_profile_samples")
class SynapseTypeDualExponential(AbstractSynapseType, AbstractContainsUnits):
    __slots__ = ["_data", "_n_neurons", "_units"]

    def __init__(self, n_neurons, tau_syn_E, tau_syn_E2, tau_syn_I,
                 initial_input_exc, initial_input_exc2, initial_input_inh):
        # pylint: disable=too-many-arguments
        self._units = {
            TAU_SYN_E: "mV",
            TAU_SYN_E2: "mV",
            TAU_SYN_I: 'mV',
            GSYN_EXC: "uS",
            GSYN_INH: "uS"
        }

        self._n_neurons = n_neurons
        self._data = SpynnakerRangeDictionary(size=n_neurons)
        self._data[TAU_SYN_E] = tau_syn_E
        self._data[TAU_SYN_E2] = tau_syn_E2
        self._data[TAU_SYN_I] = tau_syn_I
        self._data[INITIAL_INPUT_EXC] = initial_input_exc
        self._data[INITIAL_INPUT_EXC2] = initial_input_exc2
        self._data[INITIAL_INPUT_INH] = initial_input_inh

    @property
    def tau_syn_E(self):
        return self._data[TAU_SYN_E]

    @tau_syn_E.setter
    def tau_syn_E(self, tau_syn_E):
        self._data.set_value(key=TAU_SYN_E, _value=tau_syn_E)

    @property
    def tau_syn_E2(self):
        return self._data[TAU_SYN_E2]

    @tau_syn_E2.setter
    def tau_syn_E2(self, tau_syn_E2):
        self._data.set_value(key=TAU_SYN_E2, value=tau_syn_E2)

    @property
    def tau_syn_I(self):
        return self._data[TAU_SYN_I]

    @tau_syn_I.setter
    def tau_syn_I(self, tau_syn_I):
        self._data.set_value(key=TAU_SYN_I, value=tau_syn_I)

    @property
    def isyn_exc(self):
        return self._data[INITIAL_INPUT_EXC]

    @isyn_exc.setter
    def isyn_exc(self, new_value):
        self._data.set_value(key=INITIAL_INPUT_EXC, value=new_value)

    @property
    def isyn_inh(self):
        return self._data[INITIAL_INPUT_INH]

    @isyn_inh.setter
    def isyn_inh(self, new_value):
        self._data.set_value(key=INITIAL_INPUT_INH, value=new_value)

    @property
    def isyn_exc2(self):
        return self._data[INITIAL_INPUT_EXC2]

    @isyn_exc2.setter
    def isyn_exc2(self, new_value):
        self._data.set_value(key=INITIAL_INPUT_EXC2, value=new_value)

    @overrides(AbstractSynapseType.get_n_synapse_types)
    def get_n_synapse_types(self):
        return 3

    @overrides(AbstractSynapseType.get_synapse_id_by_target)
    def get_synapse_id_by_target(self, target):
        if target == "excitatory":
            return 0
        elif target == "excitatory2":
            return 1
        elif target == "inhibitory":
            return 2
        return None

    @overrides(AbstractSynapseType.get_synapse_targets)
    def get_synapse_targets(self):
        return "excitatory", "excitatory2", "inhibitory"

    @overrides(AbstractSynapseType.get_n_synapse_type_parameters)
    def get_n_synapse_type_parameters(self):
        return 9

    @inject_items({"machine_time_step": "MachineTimeStep"})
    def get_synapse_type_parameters(self, machine_time_step):
        # pylint: disable=arguments-differ
        e_decay, e_init = get_exponential_decay_and_init(
            self._data[TAU_SYN_E], machine_time_step)
        e_decay2, e_init2 = get_exponential_decay_and_init(
            self._data[TAU_SYN_E2], machine_time_step)
        i_decay, i_init = get_exponential_decay_and_init(
            self._data[TAU_SYN_I], machine_time_step)

        return [
            NeuronParameter(e_decay, _DUAL_EXP_TYPES.E_DECAY.data_type),
            NeuronParameter(e_init, _DUAL_EXP_TYPES.E_INIT.data_type),
            NeuronParameter(e_decay2, _DUAL_EXP_TYPES.E2_DECAY.data_type),
            NeuronParameter(e_init2, _DUAL_EXP_TYPES.E2_INIT.data_type),
            NeuronParameter(i_decay, _DUAL_EXP_TYPES.I_DECAY.data_type),
            NeuronParameter(i_init, _DUAL_EXP_TYPES.I_INIT.data_type),
            NeuronParameter(self._data[INITIAL_INPUT_EXC],
                            _DUAL_EXP_TYPES.INITIAL_EXC.data_type),
            NeuronParameter(self._data[INITIAL_INPUT_EXC2],
                            _DUAL_EXP_TYPES.INITIAL_EXC2.data_type),
            NeuronParameter(self._data[INITIAL_INPUT_INH],
                            _DUAL_EXP_TYPES.INITIAL_INH.data_type)
        ]

    @overrides(AbstractSynapseType.get_synapse_type_parameter_types)
    def get_synapse_type_parameter_types(self):
        return [item.data_type for item in _DUAL_EXP_TYPES]

    @overrides(AbstractSynapseType.get_n_cpu_cycles_per_neuron)
    def get_n_cpu_cycles_per_neuron(self):

        # A guess
        return 100

    @overrides(AbstractContainsUnits.get_units)
    def get_units(self, variable):
        return self._units[variable]
예제 #24
0
class AbstractPopulationVertex(
        ApplicationVertex, AbstractGeneratesDataSpecification,
        AbstractHasAssociatedBinary, AbstractContainsUnits,
        AbstractSpikeRecordable, AbstractNeuronRecordable,
        AbstractProvidesOutgoingPartitionConstraints,
        AbstractProvidesIncomingPartitionConstraints,
        AbstractPopulationInitializable, AbstractPopulationSettable,
        AbstractChangableAfterRun, AbstractRewritesDataSpecification,
        AbstractReadParametersBeforeSet, AbstractAcceptsIncomingSynapses,
        ProvidesKeyToAtomMappingImpl, AbstractCanReset):
    """ Underlying vertex model for Neural Populations.
    """
    __slots__ = [
        "__change_requires_mapping",
        "__change_requires_neuron_parameters_reload",
        "__change_requires_data_generation",
        "__incoming_spike_buffer_size",
        "__n_atoms",
        "__n_profile_samples",
        "__neuron_impl",
        "__neuron_recorder",
        "_parameters",  # See AbstractPyNNModel
        "__pynn_model",
        "_state_variables",  # See AbstractPyNNModel
        "__synapse_manager",
        "__time_between_requests",
        "__units",
        "__n_subvertices",
        "__n_data_specs",
        "__initial_state_variables",
        "__has_reset_last",
        "__updated_state_variables"
    ]

    BASIC_MALLOC_USAGE = 2

    # recording region IDs
    SPIKE_RECORDING_REGION = 0

    # the size of the runtime SDP port data region
    RUNTIME_SDP_PORT_SIZE = 4

    # 8 elements before the start of global parameters
    BYTES_TILL_START_OF_GLOBAL_PARAMETERS = 32

    # The Buffer traffic type
    TRAFFIC_IDENTIFIER = "BufferTraffic"

    _n_vertices = 0

    def __init__(self, n_neurons, label, constraints, max_atoms_per_core,
                 spikes_per_second, ring_buffer_sigma,
                 incoming_spike_buffer_size, neuron_impl, pynn_model):
        # pylint: disable=too-many-arguments, too-many-locals
        super(AbstractPopulationVertex, self).__init__(label, constraints,
                                                       max_atoms_per_core)

        self.__n_atoms = n_neurons
        self.__n_subvertices = 0
        self.__n_data_specs = 0

        # buffer data
        self.__incoming_spike_buffer_size = incoming_spike_buffer_size

        # get config from simulator
        config = globals_variables.get_simulator().config

        if incoming_spike_buffer_size is None:
            self.__incoming_spike_buffer_size = config.getint(
                "Simulation", "incoming_spike_buffer_size")

        self.__neuron_impl = neuron_impl
        self.__pynn_model = pynn_model
        self._parameters = SpynnakerRangeDictionary(n_neurons)
        self._state_variables = SpynnakerRangeDictionary(n_neurons)
        self.__neuron_impl.add_parameters(self._parameters)
        self.__neuron_impl.add_state_variables(self._state_variables)
        self.__initial_state_variables = None
        self.__updated_state_variables = set()

        # Set up for recording
        recordables = ["spikes"]
        recordables.extend(self.__neuron_impl.get_recordable_variables())
        self.__neuron_recorder = NeuronRecorder(recordables, n_neurons)

        # Set up synapse handling
        self.__synapse_manager = SynapticManager(
            self.__neuron_impl.get_n_synapse_types(), ring_buffer_sigma,
            spikes_per_second, config)

        # bool for if state has changed.
        self.__change_requires_mapping = True
        self.__change_requires_neuron_parameters_reload = False
        self.__change_requires_data_generation = False
        self.__has_reset_last = True

        # Set up for profiling
        self.__n_profile_samples = helpful_functions.read_config_int(
            config, "Reports", "n_profile_samples")

    @property
    @overrides(ApplicationVertex.n_atoms)
    def n_atoms(self):
        return self.__n_atoms

    @property
    def _neuron_recorder(self):  # for testing only
        return self.__neuron_recorder

    @inject_items({
        "graph": "MemoryApplicationGraph",
        "machine_time_step": "MachineTimeStep"
    })
    @overrides(ApplicationVertex.get_resources_used_by_atoms,
               additional_arguments={"graph", "machine_time_step"})
    def get_resources_used_by_atoms(self, vertex_slice, graph,
                                    machine_time_step):
        # pylint: disable=arguments-differ

        variableSDRAM = self.__neuron_recorder.get_variable_sdram_usage(
            vertex_slice)
        constantSDRAM = ConstantSDRAM(
            self._get_sdram_usage_for_atoms(vertex_slice, graph,
                                            machine_time_step))

        # set resources required from this object
        container = ResourceContainer(
            sdram=variableSDRAM + constantSDRAM,
            dtcm=DTCMResource(self.get_dtcm_usage_for_atoms(vertex_slice)),
            cpu_cycles=CPUCyclesPerTickResource(
                self.get_cpu_usage_for_atoms(vertex_slice)))

        # return the total resources.
        return container

    @property
    @overrides(AbstractChangableAfterRun.requires_mapping)
    def requires_mapping(self):
        return self.__change_requires_mapping

    @property
    @overrides(AbstractChangableAfterRun.requires_data_generation)
    def requires_data_generation(self):
        return self.__change_requires_data_generation

    @overrides(AbstractChangableAfterRun.mark_no_changes)
    def mark_no_changes(self):
        self.__change_requires_mapping = False
        self.__change_requires_data_generation = False

    # CB: May be dead code
    def _get_buffered_sdram_per_timestep(self, vertex_slice):
        values = [
            self.__neuron_recorder.get_buffered_sdram_per_timestep(
                "spikes", vertex_slice)
        ]
        for variable in self.__neuron_impl.get_recordable_variables():
            values.append(
                self.__neuron_recorder.get_buffered_sdram_per_timestep(
                    variable, vertex_slice))
        return values

    def _get_buffered_sdram(self, vertex_slice, n_machine_time_steps):
        values = [
            self.__neuron_recorder.get_buffered_sdram("spikes", vertex_slice,
                                                      n_machine_time_steps)
        ]
        for variable in self.__neuron_impl.get_recordable_variables():
            values.append(
                self.__neuron_recorder.get_buffered_sdram(
                    variable, vertex_slice, n_machine_time_steps))
        return values

    @overrides(ApplicationVertex.create_machine_vertex)
    def create_machine_vertex(self,
                              vertex_slice,
                              resources_required,
                              label=None,
                              constraints=None):

        self.__n_subvertices += 1
        return PopulationMachineVertex(
            resources_required, self.__neuron_recorder.recorded_region_ids,
            label, constraints)

    def get_cpu_usage_for_atoms(self, vertex_slice):
        return (_NEURON_BASE_N_CPU_CYCLES + _C_MAIN_BASE_N_CPU_CYCLES +
                (_NEURON_BASE_N_CPU_CYCLES_PER_NEURON * vertex_slice.n_atoms) +
                self.__neuron_recorder.get_n_cpu_cycles(vertex_slice.n_atoms) +
                self.__neuron_impl.get_n_cpu_cycles(vertex_slice.n_atoms) +
                self.__synapse_manager.get_n_cpu_cycles())

    def get_dtcm_usage_for_atoms(self, vertex_slice):
        return (
            _NEURON_BASE_DTCM_USAGE_IN_BYTES +
            self.__neuron_impl.get_dtcm_usage_in_bytes(vertex_slice.n_atoms) +
            self.__neuron_recorder.get_dtcm_usage_in_bytes(vertex_slice) +
            self.__synapse_manager.get_dtcm_usage_in_bytes())

    def _get_sdram_usage_for_neuron_params(self, vertex_slice):
        """ Calculate the SDRAM usage for just the neuron parameters region.

        :param vertex_slice: the slice of atoms.
        :return: The SDRAM required for the neuron region
        """
        return (
            self.BYTES_TILL_START_OF_GLOBAL_PARAMETERS +
            self.__neuron_recorder.get_sdram_usage_in_bytes(vertex_slice) +
            self.__neuron_impl.get_sdram_usage_in_bytes(vertex_slice.n_atoms))

    def _get_sdram_usage_for_atoms(self, vertex_slice, graph,
                                   machine_time_step):
        n_record = len(self.__neuron_impl.get_recordable_variables()) + 1
        sdram_requirement = (
            common_constants.SYSTEM_BYTES_REQUIREMENT +
            self._get_sdram_usage_for_neuron_params(vertex_slice) +
            recording_utilities.get_recording_header_size(n_record) +
            recording_utilities.get_recording_data_constant_size(n_record) +
            PopulationMachineVertex.get_provenance_data_size(
                PopulationMachineVertex.N_ADDITIONAL_PROVENANCE_DATA_ITEMS) +
            self.__synapse_manager.get_sdram_usage_in_bytes(
                vertex_slice, graph.get_edges_ending_at_vertex(self),
                machine_time_step) +
            profile_utils.get_profile_region_size(self.__n_profile_samples))

        return sdram_requirement

    def _reserve_memory_regions(self, spec, vertex_slice, vertex):

        spec.comment("\nReserving memory space for data regions:\n\n")

        # Reserve memory:
        spec.reserve_memory_region(
            region=constants.POPULATION_BASED_REGIONS.SYSTEM.value,
            size=common_constants.SIMULATION_N_BYTES,
            label='System')

        self._reserve_neuron_params_data_region(spec, vertex_slice)

        spec.reserve_memory_region(
            region=constants.POPULATION_BASED_REGIONS.RECORDING.value,
            size=recording_utilities.get_recording_header_size(
                len(self.__neuron_impl.get_recordable_variables()) + 1))

        profile_utils.reserve_profile_region(
            spec, constants.POPULATION_BASED_REGIONS.PROFILING.value,
            self.__n_profile_samples)

        vertex.reserve_provenance_data_region(spec)

    def _reserve_neuron_params_data_region(self, spec, vertex_slice):
        """ Reserve the neuron parameter data region.

        :param spec: the spec to write the DSG region to
        :param vertex_slice: the slice of atoms from the application vertex
        :return: None
        """
        params_size = self._get_sdram_usage_for_neuron_params(vertex_slice)
        spec.reserve_memory_region(
            region=constants.POPULATION_BASED_REGIONS.NEURON_PARAMS.value,
            size=params_size,
            label='NeuronParams')

    @staticmethod
    def __copy_ranged_dict(source, merge=None, merge_keys=None):
        target = SpynnakerRangeDictionary(len(source))
        for key in source.keys():
            copy_list = SpynnakerRangedList(len(source))
            if merge_keys is None or key not in merge_keys:
                init_list = source.get_list(key)
            else:
                init_list = merge.get_list(key)
            for start, stop, value in init_list.iter_ranges():
                is_list = (hasattr(value, '__iter__')
                           and not isinstance(value, str))
                copy_list.set_value_by_slice(start, stop, value, is_list)
            target[key] = copy_list
        return target

    def _write_neuron_parameters(self, spec, key, vertex_slice,
                                 machine_time_step, time_scale_factor):

        # If resetting, reset any state variables that need to be reset
        if (self.__has_reset_last
                and self.__initial_state_variables is not None):
            self._state_variables = self.__copy_ranged_dict(
                self.__initial_state_variables, self._state_variables,
                self.__updated_state_variables)
            self.__initial_state_variables = None

        # If no initial state variables, copy them now
        if self.__has_reset_last:
            self.__initial_state_variables = self.__copy_ranged_dict(
                self._state_variables)

        # Reset things that need resetting
        self.__has_reset_last = False
        self.__updated_state_variables.clear()

        # pylint: disable=too-many-arguments
        n_atoms = vertex_slice.n_atoms
        spec.comment(
            "\nWriting Neuron Parameters for {} Neurons:\n".format(n_atoms))

        # Set the focus to the memory region 2 (neuron parameters):
        spec.switch_write_focus(
            region=constants.POPULATION_BASED_REGIONS.NEURON_PARAMS.value)

        # Write the random back off value
        max_offset = (machine_time_step *
                      time_scale_factor) // _MAX_OFFSET_DENOMINATOR
        spec.write_value(
            int(math.ceil(max_offset / self.__n_subvertices)) *
            self.__n_data_specs)
        self.__n_data_specs += 1

        # Write the number of microseconds between sending spikes
        time_between_spikes = ((machine_time_step * time_scale_factor) /
                               (n_atoms * 2.0))
        spec.write_value(data=int(time_between_spikes))

        # Write whether the key is to be used, and then the key, or 0 if it
        # isn't to be used
        if key is None:
            spec.write_value(data=0)
            spec.write_value(data=0)
        else:
            spec.write_value(data=1)
            spec.write_value(data=key)

        # Write the number of neurons in the block:
        spec.write_value(data=n_atoms)

        # Write the number of synapse types
        spec.write_value(data=self.__neuron_impl.get_n_synapse_types())

        # Write the size of the incoming spike buffer
        spec.write_value(data=self.__incoming_spike_buffer_size)

        # Write the number of variables that can be recorded
        spec.write_value(
            data=len(self.__neuron_impl.get_recordable_variables()))

        # Write the recording data
        recording_data = self.__neuron_recorder.get_data(vertex_slice)
        spec.write_array(recording_data)

        # Write the neuron parameters
        neuron_data = self.__neuron_impl.get_data(self._parameters,
                                                  self._state_variables,
                                                  vertex_slice)
        spec.write_array(neuron_data)

    @inject_items({
        "machine_time_step": "MachineTimeStep",
        "time_scale_factor": "TimeScaleFactor",
        "graph_mapper": "MemoryGraphMapper",
        "routing_info": "MemoryRoutingInfos"
    })
    @overrides(AbstractRewritesDataSpecification.regenerate_data_specification,
               additional_arguments={
                   "machine_time_step", "time_scale_factor", "graph_mapper",
                   "routing_info"
               })
    def regenerate_data_specification(self, spec, placement, machine_time_step,
                                      time_scale_factor, graph_mapper,
                                      routing_info):
        # pylint: disable=too-many-arguments, arguments-differ
        vertex_slice = graph_mapper.get_slice(placement.vertex)

        # reserve the neuron parameters data region
        self._reserve_neuron_params_data_region(
            spec, graph_mapper.get_slice(placement.vertex))

        # write the neuron params into the new DSG region
        self._write_neuron_parameters(
            key=routing_info.get_first_key_from_pre_vertex(
                placement.vertex, constants.SPIKE_PARTITION_ID),
            machine_time_step=machine_time_step,
            spec=spec,
            time_scale_factor=time_scale_factor,
            vertex_slice=vertex_slice)

        # close spec
        spec.end_specification()

    @overrides(AbstractRewritesDataSpecification.
               requires_memory_regions_to_be_reloaded)
    def requires_memory_regions_to_be_reloaded(self):
        return self.__change_requires_neuron_parameters_reload

    @overrides(AbstractRewritesDataSpecification.mark_regions_reloaded)
    def mark_regions_reloaded(self):
        self.__change_requires_neuron_parameters_reload = False

    @inject_items({
        "machine_time_step": "MachineTimeStep",
        "time_scale_factor": "TimeScaleFactor",
        "graph_mapper": "MemoryGraphMapper",
        "application_graph": "MemoryApplicationGraph",
        "machine_graph": "MemoryMachineGraph",
        "routing_info": "MemoryRoutingInfos",
        "data_n_time_steps": "DataNTimeSteps"
    })
    @overrides(AbstractGeneratesDataSpecification.generate_data_specification,
               additional_arguments={
                   "machine_time_step", "time_scale_factor", "graph_mapper",
                   "application_graph", "machine_graph", "routing_info",
                   "data_n_time_steps"
               })
    def generate_data_specification(self, spec, placement, machine_time_step,
                                    time_scale_factor, graph_mapper,
                                    application_graph, machine_graph,
                                    routing_info, data_n_time_steps):
        # pylint: disable=too-many-arguments, arguments-differ
        vertex = placement.vertex

        spec.comment("\n*** Spec for block of {} neurons ***\n".format(
            self.__neuron_impl.model_name))
        vertex_slice = graph_mapper.get_slice(vertex)

        # Reserve memory regions
        self._reserve_memory_regions(spec, vertex_slice, vertex)

        # Declare random number generators and distributions:
        # TODO add random distribution stuff
        # self.write_random_distribution_declarations(spec)

        # Get the key
        key = routing_info.get_first_key_from_pre_vertex(
            vertex, constants.SPIKE_PARTITION_ID)

        # Write the setup region
        spec.switch_write_focus(
            constants.POPULATION_BASED_REGIONS.SYSTEM.value)
        spec.write_array(
            simulation_utilities.get_simulation_header_array(
                self.get_binary_file_name(), machine_time_step,
                time_scale_factor))

        # Write the recording region
        spec.switch_write_focus(
            constants.POPULATION_BASED_REGIONS.RECORDING.value)
        spec.write_array(
            recording_utilities.get_recording_header_array(
                self._get_buffered_sdram(vertex_slice, data_n_time_steps)))

        # Write the neuron parameters
        self._write_neuron_parameters(spec, key, vertex_slice,
                                      machine_time_step, time_scale_factor)

        # write profile data
        profile_utils.write_profile_region_data(
            spec, constants.POPULATION_BASED_REGIONS.PROFILING.value,
            self.__n_profile_samples)

        # Get the weight_scale value from the appropriate location
        weight_scale = self.__neuron_impl.get_global_weight_scale()

        # allow the synaptic matrix to write its data spec-able data
        self.__synapse_manager.write_data_spec(spec, self, vertex_slice,
                                               vertex, placement,
                                               machine_graph,
                                               application_graph, routing_info,
                                               graph_mapper, weight_scale,
                                               machine_time_step)

        # End the writing of this specification:
        spec.end_specification()

    @overrides(AbstractHasAssociatedBinary.get_binary_file_name)
    def get_binary_file_name(self):

        # Split binary name into title and extension
        binary_title, binary_extension = os.path.splitext(
            self.__neuron_impl.binary_name)

        # Reunite title and extension and return
        return (binary_title +
                self.__synapse_manager.vertex_executable_suffix +
                binary_extension)

    @overrides(AbstractHasAssociatedBinary.get_binary_start_type)
    def get_binary_start_type(self):
        return ExecutableType.USES_SIMULATION_INTERFACE

    @overrides(AbstractSpikeRecordable.is_recording_spikes)
    def is_recording_spikes(self):
        return self.__neuron_recorder.is_recording("spikes")

    @overrides(AbstractSpikeRecordable.set_recording_spikes)
    def set_recording_spikes(self,
                             new_state=True,
                             sampling_interval=None,
                             indexes=None):
        self.set_recording("spikes", new_state, sampling_interval, indexes)

    @overrides(AbstractSpikeRecordable.get_spikes)
    def get_spikes(self, placements, graph_mapper, buffer_manager,
                   machine_time_step):
        return self.__neuron_recorder.get_spikes(self.label, buffer_manager,
                                                 self.SPIKE_RECORDING_REGION,
                                                 placements, graph_mapper,
                                                 self, machine_time_step)

    @overrides(AbstractNeuronRecordable.get_recordable_variables)
    def get_recordable_variables(self):
        return self.__neuron_recorder.get_recordable_variables()

    @overrides(AbstractNeuronRecordable.is_recording)
    def is_recording(self, variable):
        return self.__neuron_recorder.is_recording(variable)

    @overrides(AbstractNeuronRecordable.set_recording)
    def set_recording(self,
                      variable,
                      new_state=True,
                      sampling_interval=None,
                      indexes=None):
        self.__change_requires_mapping = not self.is_recording(variable)
        self.__neuron_recorder.set_recording(variable, new_state,
                                             sampling_interval, indexes)

    @overrides(AbstractNeuronRecordable.get_data)
    def get_data(self, variable, n_machine_time_steps, placements,
                 graph_mapper, buffer_manager, machine_time_step):
        # pylint: disable=too-many-arguments
        index = 0
        if variable != "spikes":
            index = 1 + self.__neuron_impl.get_recordable_variable_index(
                variable)
        return self.__neuron_recorder.get_matrix_data(self.label,
                                                      buffer_manager, index,
                                                      placements, graph_mapper,
                                                      self, variable,
                                                      n_machine_time_steps)

    @overrides(AbstractNeuronRecordable.get_neuron_sampling_interval)
    def get_neuron_sampling_interval(self, variable):
        return self.__neuron_recorder.get_neuron_sampling_interval(variable)

    @overrides(AbstractSpikeRecordable.get_spikes_sampling_interval)
    def get_spikes_sampling_interval(self):
        return self.__neuron_recorder.get_neuron_sampling_interval("spikes")

    @overrides(AbstractPopulationInitializable.initialize)
    def initialize(self, variable, value):
        if not self.__has_reset_last:
            raise Exception(
                "initialize can only be called before the first call to run, "
                "or before the first call to run after a reset")
        if variable not in self._state_variables:
            raise KeyError("Vertex does not support initialisation of"
                           " parameter {}".format(variable))
        self._state_variables.set_value(variable, value)
        self.__updated_state_variables.add(variable)
        self.__change_requires_neuron_parameters_reload = True

    @property
    def initialize_parameters(self):
        return self.__pynn_model.default_initial_values.keys()

    def _get_parameter(self, variable):
        if variable.endswith("_init"):
            # method called with "V_init"
            key = variable[:-5]
            if variable in self._state_variables:
                # variable is v and parameter is v_init
                return variable
            elif key in self._state_variables:
                # Oops neuron defines v and not v_init
                return key
        else:
            # method called with "v"
            if variable + "_init" in self._state_variables:
                # variable is v and parameter is v_init
                return variable + "_init"
            if variable in self._state_variables:
                # Oops neuron defines v and not v_init
                return variable

        # parameter not found for this variable
        raise KeyError("No variable {} found in {}".format(
            variable, self.__neuron_impl.model_name))

    @overrides(AbstractPopulationInitializable.get_initial_value)
    def get_initial_value(self, variable, selector=None):
        parameter = self._get_parameter(variable)

        ranged_list = self._state_variables[parameter]
        if selector is None:
            return ranged_list
        return ranged_list.get_values(selector)

    @overrides(AbstractPopulationInitializable.set_initial_value)
    def set_initial_value(self, variable, value, selector=None):
        parameter = self._get_parameter(variable)

        ranged_list = self._state_variables[parameter]
        ranged_list.set_value_by_selector(selector, value)

    @property
    def conductance_based(self):
        return self.__neuron_impl.is_conductance_based

    @overrides(AbstractPopulationSettable.get_value)
    def get_value(self, key):
        """ Get a property of the overall model.
        """
        if key not in self._parameters:
            raise InvalidParameterType(
                "Population {} does not have parameter {}".format(
                    self.__neuron_impl.model_name, key))
        return self._parameters[key]

    @overrides(AbstractPopulationSettable.set_value)
    def set_value(self, key, value):
        """ Set a property of the overall model.
        """
        if key not in self._parameters:
            raise InvalidParameterType(
                "Population {} does not have parameter {}".format(
                    self.__neuron_impl.model_name, key))
        self._parameters.set_value(key, value)
        self.__change_requires_neuron_parameters_reload = True

    @overrides(AbstractReadParametersBeforeSet.read_parameters_from_machine)
    def read_parameters_from_machine(self, transceiver, placement,
                                     vertex_slice):

        # locate SDRAM address to where the neuron parameters are stored
        neuron_region_sdram_address = \
            helpful_functions.locate_memory_region_for_placement(
                placement,
                constants.POPULATION_BASED_REGIONS.NEURON_PARAMS.value,
                transceiver)

        # shift past the extra stuff before neuron parameters that we don't
        # need to read
        neuron_parameters_sdram_address = (
            neuron_region_sdram_address +
            self.BYTES_TILL_START_OF_GLOBAL_PARAMETERS)

        # get size of neuron params
        size_of_region = self._get_sdram_usage_for_neuron_params(vertex_slice)
        size_of_region -= self.BYTES_TILL_START_OF_GLOBAL_PARAMETERS

        # get data from the machine
        byte_array = transceiver.read_memory(placement.x, placement.y,
                                             neuron_parameters_sdram_address,
                                             size_of_region)

        # Skip the recorder globals as these are not change on machine
        # Just written out in case data is changed and written back
        offset = self.__neuron_recorder.get_sdram_usage_in_bytes(vertex_slice)

        # update python neuron parameters with the data
        self.__neuron_impl.read_data(byte_array, offset, vertex_slice,
                                     self._parameters, self._state_variables)

    @property
    def weight_scale(self):
        return self.__neuron_impl.get_global_weight_scale()

    @property
    def ring_buffer_sigma(self):
        return self.__synapse_manager.ring_buffer_sigma

    @ring_buffer_sigma.setter
    def ring_buffer_sigma(self, ring_buffer_sigma):
        self.__synapse_manager.ring_buffer_sigma = ring_buffer_sigma

    @property
    def spikes_per_second(self):
        return self.__synapse_manager.spikes_per_second

    @spikes_per_second.setter
    def spikes_per_second(self, spikes_per_second):
        self.__synapse_manager.spikes_per_second = spikes_per_second

    @property
    def synapse_dynamics(self):
        return self.__synapse_manager.synapse_dynamics

    def set_synapse_dynamics(self, synapse_dynamics):
        self.__synapse_manager.synapse_dynamics = synapse_dynamics

    def add_pre_run_connection_holder(self, connection_holder, edge,
                                      synapse_info):
        # pylint: disable=arguments-differ
        self.__synapse_manager.add_pre_run_connection_holder(
            connection_holder, edge, synapse_info)

    def get_connection_holders(self):
        return self.__synapse_manager.get_connection_holders()

    @overrides(AbstractAcceptsIncomingSynapses.get_connections_from_machine)
    def get_connections_from_machine(self,
                                     transceiver,
                                     placement,
                                     edge,
                                     graph_mapper,
                                     routing_infos,
                                     synapse_information,
                                     machine_time_step,
                                     using_extra_monitor_cores,
                                     placements=None,
                                     monitor_api=None,
                                     monitor_placement=None,
                                     monitor_cores=None,
                                     handle_time_out_configuration=True,
                                     fixed_routes=None):
        # pylint: disable=too-many-arguments
        return self.__synapse_manager.get_connections_from_machine(
            transceiver, placement, edge, graph_mapper, routing_infos,
            synapse_information, machine_time_step, using_extra_monitor_cores,
            placements, monitor_api, monitor_placement, monitor_cores,
            handle_time_out_configuration, fixed_routes)

    def clear_connection_cache(self):
        self.__synapse_manager.clear_connection_cache()

    def get_maximum_delay_supported_in_ms(self, machine_time_step):
        return self.__synapse_manager.get_maximum_delay_supported_in_ms(
            machine_time_step)

    @overrides(AbstractProvidesIncomingPartitionConstraints.
               get_incoming_partition_constraints)
    def get_incoming_partition_constraints(self, partition):
        """ Gets the constraints for partitions going into this vertex.

        :param partition: partition that goes into this vertex
        :return: list of constraints
        """
        return self.__synapse_manager.get_incoming_partition_constraints()

    @overrides(AbstractProvidesOutgoingPartitionConstraints.
               get_outgoing_partition_constraints)
    def get_outgoing_partition_constraints(self, partition):
        """ Gets the constraints for partitions going out of this vertex.

        :param partition: the partition that leaves this vertex
        :return: list of constraints
        """
        return [ContiguousKeyRangeContraint()]

    @overrides(AbstractNeuronRecordable.clear_recording)
    def clear_recording(self, variable, buffer_manager, placements,
                        graph_mapper):
        index = 0
        if variable != "spikes":
            index = 1 + self.__neuron_impl.get_recordable_variable_index(
                variable)
        self._clear_recording_region(buffer_manager, placements, graph_mapper,
                                     index)

    @overrides(AbstractSpikeRecordable.clear_spike_recording)
    def clear_spike_recording(self, buffer_manager, placements, graph_mapper):
        self._clear_recording_region(
            buffer_manager, placements, graph_mapper,
            AbstractPopulationVertex.SPIKE_RECORDING_REGION)

    def _clear_recording_region(self, buffer_manager, placements, graph_mapper,
                                recording_region_id):
        """ Clear a recorded data region from the buffer manager.

        :param buffer_manager: the buffer manager object
        :param placements: the placements object
        :param graph_mapper: the graph mapper object
        :param recording_region_id: the recorded region ID for clearing
        :rtype: None
        """
        machine_vertices = graph_mapper.get_machine_vertices(self)
        for machine_vertex in machine_vertices:
            placement = placements.get_placement_of_vertex(machine_vertex)
            buffer_manager.clear_recorded_data(placement.x, placement.y,
                                               placement.p,
                                               recording_region_id)

    @overrides(AbstractContainsUnits.get_units)
    def get_units(self, variable):
        if self.__neuron_impl.is_recordable(variable):
            return self.__neuron_impl.get_recordable_units(variable)
        if variable not in self._parameters:
            raise Exception("Population {} does not have parameter {}".format(
                self.__neuron_impl.model_name, variable))
        return self.__neuron_impl.get_units(variable)

    def describe(self):
        """ Get a human-readable description of the cell or synapse type.

        The output may be customised by specifying a different template\
        together with an associated template engine\
        (see ``pyNN.descriptions``).

        If template is None, then a dictionary containing the template context\
        will be returned.
        """
        parameters = dict()
        for parameter_name in self.__pynn_model.default_parameters:
            parameters[parameter_name] = self.get_value(parameter_name)

        context = {
            "name": self.__neuron_impl.model_name,
            "default_parameters": self.__pynn_model.default_parameters,
            "default_initial_values": self.__pynn_model.default_parameters,
            "parameters": parameters,
        }
        return context

    def get_synapse_id_by_target(self, target):
        return self.__neuron_impl.get_synapse_id_by_target(target)

    def __str__(self):
        return "{} with {} atoms".format(self.label, self.n_atoms)

    def __repr__(self):
        return self.__str__()

    def gen_on_machine(self, vertex_slice):
        return self.__synapse_manager.gen_on_machine(vertex_slice)

    @overrides(AbstractCanReset.reset_to_first_timestep)
    def reset_to_first_timestep(self):
        # Mark that reset has been done, and reload state variables
        self.__has_reset_last = True
        self.__change_requires_neuron_parameters_reload = True

        # If synapses change during the run,
        if self.__synapse_manager.synapse_dynamics.changes_during_run:
            self.__change_requires_data_generation = True
            self.__change_requires_neuron_parameters_reload = False
예제 #25
0
class NeuronModelIzh(AbstractNeuronModel, AbstractContainsUnits):
    __slots__ = ["_data", "_n_neurons", "_units"]

    def __init__(self, n_neurons, a, b, c, d, v_init, u_init, i_offset):
        # pylint: disable=too-many-arguments
        self._units = {
            A: "ms",
            B: "ms",
            C: "mV",
            D: "mV/ms",
            V_INIT: "mV",
            U_INIT: "mV/ms",
            I_OFFSET: "nA"
        }

        self._n_neurons = n_neurons
        self._data = SpynnakerRangeDictionary(size=n_neurons)
        self._data[A] = a
        self._data[B] = b
        self._data[C] = c
        self._data[D] = d
        self._data[V_INIT] = v_init
        self._data[U_INIT] = u_init
        self._data[I_OFFSET] = i_offset

    @property
    def a(self):
        return self._data[A]

    @a.setter
    def a(self, a):
        self._data.set_value(key=A, value=a)

    @property
    def b(self):
        return self._data[B]

    @b.setter
    def b(self, b):
        self._data.set_value(key=B, value=b)

    @property
    def c(self):
        return self._data[C]

    @c.setter
    def c(self, c):
        self._data.set_value(key=C, value=c)

    @property
    def d(self):
        return self._data[D]

    @d.setter
    def d(self, d):
        self._data.set_value(key=D, value=d)

    @property
    def i_offset(self):
        return self._data[I_OFFSET]

    @i_offset.setter
    def i_offset(self, i_offset):
        self._data.set_value(key=I_OFFSET, value=i_offset)

    @property
    def v_init(self):
        return self._data[V_INIT]

    @v_init.setter
    def v_init(self, v_init):
        self._data.set_value(key=V_INIT, value=v_init)

    @property
    def u_init(self):
        return self._data[U_INIT]

    @u_init.setter
    def u_init(self, u_init):
        self._data.set_value(key=U_INIT, value=u_init)

    def initialize_v(self, v_init):
        self._data.set_value(key=V_INIT, value=v_init)

    def initialize_u(self, u_init):
        self._data.set_value(key=U_INIT, value=u_init)

    @overrides(AbstractNeuronModel.get_n_neural_parameters)
    def get_n_neural_parameters(self):
        return 8

    @inject_items({"machine_time_step": "MachineTimeStep"})
    @overrides(AbstractNeuronModel.get_neural_parameters,
               additional_arguments={'machine_time_step'})
    def get_neural_parameters(self, machine_time_step):
        # pylint: disable=arguments-differ
        return [
            # REAL A
            NeuronParameter(self._data[A], _IZH_TYPES.A.data_type),

            # REAL B
            NeuronParameter(self._data[B], _IZH_TYPES.B.data_type),

            # REAL C
            NeuronParameter(self._data[C], _IZH_TYPES.C.data_type),

            # REAL D
            NeuronParameter(self._data[D], _IZH_TYPES.D.data_type),

            # REAL V
            NeuronParameter(self._data[V_INIT], _IZH_TYPES.V_INIT.data_type),

            # REAL U
            NeuronParameter(self._data[U_INIT], _IZH_TYPES.U_INIT.data_type),

            # offset current [nA]
            # REAL I_offset;
            NeuronParameter(self._data[I_OFFSET],
                            _IZH_TYPES.I_OFFSET.data_type),

            # current timestep - simple correction for threshold
            # REAL this_h;
            NeuronParameter(machine_time_step / 1000.0,
                            _IZH_TYPES.THIS_H.data_type)
        ]

    @overrides(AbstractNeuronModel.get_neural_parameter_types)
    def get_neural_parameter_types(self):
        return [item.data_type for item in _IZH_TYPES]

    @overrides(AbstractNeuronModel.get_n_global_parameters)
    def get_n_global_parameters(self):
        return 1

    @inject_items({"machine_time_step": "MachineTimeStep"})
    @overrides(AbstractNeuronModel.get_global_parameters,
               additional_arguments={'machine_time_step'})
    def get_global_parameters(self, machine_time_step):
        # pylint: disable=arguments-differ
        return [
            NeuronParameter(machine_time_step / 1000.0,
                            _IZH_GLOBAL_TYPES.TIMESTEP.data_type)
        ]

    @overrides(AbstractNeuronModel.get_global_parameter_types)
    def get_global_parameter_types(self):
        return [item.data_type for item in _IZH_GLOBAL_TYPES]

    @overrides(AbstractNeuronModel.set_neural_parameters)
    def set_neural_parameters(self, neural_parameters, vertex_slice):
        self._data[V_INIT][vertex_slice.as_slice] = neural_parameters[4]
        self._data[U_INIT][vertex_slice.as_slice] = neural_parameters[5]

    def get_n_cpu_cycles_per_neuron(self):

        # A bit of a guess
        return 150

    @overrides(AbstractContainsUnits.get_units)
    def get_units(self, variable):
        return self._units[variable]