def __init__(
            self, n_neurons, machine_time_step, timescale_factor,
            constraints=None, label="SpikeSourcePoisson", rate=1.0, start=0.0,
            duration=None, seed=None):
        AbstractPartitionableVertex.__init__(
            self, n_atoms=n_neurons, label=label, constraints=constraints,
            max_atoms_per_core=self._model_based_max_atoms_per_core)
        AbstractDataSpecableVertex.__init__(
            self, machine_time_step=machine_time_step,
            timescale_factor=timescale_factor)
        AbstractSpikeRecordable.__init__(self)
        ReceiveBuffersToHostBasicImpl.__init__(self)
        AbstractProvidesOutgoingEdgeConstraints.__init__(self)
        PopulationSettableChangeRequiresMapping.__init__(self)

        # Store the parameters
        self._rate = rate
        self._start = start
        self._duration = duration
        self._rng = numpy.random.RandomState(seed)

        # Prepare for recording, and to get spikes
        self._spike_recorder = SpikeRecorder(machine_time_step)
        self._spike_buffer_max_size = config.getint(
            "Buffers", "spike_buffer_size")
        self._buffer_size_before_receive = config.getint(
            "Buffers", "buffer_size_before_receive")
        self._time_between_requests = config.getint(
            "Buffers", "time_between_requests")
    def __init__(self, n_neurons, machine_time_step, timescale_factor,
                 constraints=None, label="SpikeSourcePoisson",
                 rate=1.0, start=0.0, duration=None, seed=None):
        """
        Creates a new SpikeSourcePoisson Object.
        """
        AbstractPartitionableVertex.__init__(
            self, n_atoms=n_neurons, label=label, constraints=constraints,
            max_atoms_per_core=self._model_based_max_atoms_per_core)
        AbstractDataSpecableVertex.__init__(
            self, machine_time_step=machine_time_step,
            timescale_factor=timescale_factor)
        AbstractSpikeRecordable.__init__(self)

        # Store the parameters
        self._rate = rate
        self._start = start
        self._duration = duration
        self._rng = numpy.random.RandomState(seed)

        # Prepare for recording, and to get spikes
        self._spike_recorder = SpikeRecorder(machine_time_step)

        self._outgoing_edge_key_restrictor = \
            OutgoingEdgeSameContiguousKeysRestrictor()
Beispiel #3
0
    def __init__(
            self, n_neurons, machine_time_step, timescale_factor,
            constraints=None, label="SpikeSourcePoisson", rate=1.0, start=0.0,
            duration=None, seed=None):
        AbstractPartitionableVertex.__init__(
            self, n_neurons, label, self._model_based_max_atoms_per_core,
            constraints)
        AbstractDataSpecableVertex.__init__(
            self, machine_time_step=machine_time_step,
            timescale_factor=timescale_factor)
        AbstractSpikeRecordable.__init__(self)
        AbstractProvidesOutgoingPartitionConstraints.__init__(self)
        PopulationSettableChangeRequiresMapping.__init__(self)

        # Store the parameters
        self._rate = rate
        self._start = start
        self._duration = duration
        self._rng = numpy.random.RandomState(seed)

        # Prepare for recording, and to get spikes
        self._spike_recorder = SpikeRecorder(machine_time_step)
        self._spike_buffer_max_size = config.getint(
            "Buffers", "spike_buffer_size")
        self._buffer_size_before_receive = config.getint(
            "Buffers", "buffer_size_before_receive")
        self._time_between_requests = config.getint(
            "Buffers", "time_between_requests")
        self._enable_buffered_recording = config.getboolean(
            "Buffers", "enable_buffered_recording")
        self._receive_buffer_host = config.get(
            "Buffers", "receive_buffer_host")
        self._receive_buffer_port = config.getint(
            "Buffers", "receive_buffer_port")
        self._minimum_buffer_sdram = config.getint(
            "Buffers", "minimum_buffer_sdram")
        self._using_auto_pause_and_resume = config.getboolean(
            "Buffers", "use_auto_pause_and_resume")
class SpikeSourcePoisson(
        AbstractPartitionableVertex, AbstractDataSpecableVertex,
        AbstractSpikeRecordable, AbstractProvidesOutgoingEdgeConstraints,
        PopulationSettableChangeRequiresMapping,
        ReceiveBuffersToHostBasicImpl):
    """ A Poisson Spike source object
    """

    _POISSON_SPIKE_SOURCE_REGIONS = Enum(
        value="_POISSON_SPIKE_SOURCE_REGIONS",
        names=[('SYSTEM_REGION', 0),
               ('POISSON_PARAMS_REGION', 1),
               ('SPIKE_HISTORY_REGION', 2),
               ('BUFFERING_OUT_STATE', 3)])

    _N_POPULATION_RECORDING_REGIONS = 1
    _DEFAULT_MALLOCS_USED = 2

    # Technically, this is ~2900 in terms of DTCM, but is timescale dependent
    # in terms of CPU (2900 at 10 times slow down is fine, but not at
    # real-time)
    _model_based_max_atoms_per_core = 500

    def __init__(
            self, n_neurons, machine_time_step, timescale_factor,
            constraints=None, label="SpikeSourcePoisson", rate=1.0, start=0.0,
            duration=None, seed=None):
        AbstractPartitionableVertex.__init__(
            self, n_atoms=n_neurons, label=label, constraints=constraints,
            max_atoms_per_core=self._model_based_max_atoms_per_core)
        AbstractDataSpecableVertex.__init__(
            self, machine_time_step=machine_time_step,
            timescale_factor=timescale_factor)
        AbstractSpikeRecordable.__init__(self)
        ReceiveBuffersToHostBasicImpl.__init__(self)
        AbstractProvidesOutgoingEdgeConstraints.__init__(self)
        PopulationSettableChangeRequiresMapping.__init__(self)

        # Store the parameters
        self._rate = rate
        self._start = start
        self._duration = duration
        self._rng = numpy.random.RandomState(seed)

        # Prepare for recording, and to get spikes
        self._spike_recorder = SpikeRecorder(machine_time_step)
        self._spike_buffer_max_size = config.getint(
            "Buffers", "spike_buffer_size")
        self._buffer_size_before_receive = config.getint(
            "Buffers", "buffer_size_before_receive")
        self._time_between_requests = config.getint(
            "Buffers", "time_between_requests")

    @property
    def rate(self):
        return self._rate

    @rate.setter
    def rate(self, rate):
        self._rate = rate

    @property
    def start(self):
        return self._start

    @start.setter
    def start(self, start):
        self._start = start

    @property
    def duration(self):
        return self._duration

    @duration.setter
    def duration(self, duration):
        self._duration = duration

    @property
    def seed(self):
        return self._seed

    @seed.setter
    def seed(self, seed):
        self._seed = seed

    @property
    def model_name(self):
        """ Return a string representing a label for this class.
        """
        return "SpikeSourcePoisson"

    @staticmethod
    def set_model_max_atoms_per_core(new_value):
        SpikeSourcePoisson._model_based_max_atoms_per_core = new_value

    @staticmethod
    def get_params_bytes(vertex_slice):
        """ Gets the size of the poisson parameters in bytes
        :param vertex_slice:
        """
        return (RANDOM_SEED_WORDS + PARAMS_BASE_WORDS +
                (((vertex_slice.hi_atom - vertex_slice.lo_atom) + 1) *
                 PARAMS_WORDS_PER_NEURON)) * 4

    def reserve_memory_regions(self, spec, setup_sz, poisson_params_sz,
                               spike_hist_buff_sz):
        """ Reserve memory regions for poisson source parameters and output\
            buffer.
        :param spec:
        :param setup_sz:
        :param poisson_params_sz:
        :param spike_hist_buff_sz:
        :return:
        """
        spec.comment("\nReserving memory space for data regions:\n\n")

        # Reserve memory:
        spec.reserve_memory_region(
            region=self._POISSON_SPIKE_SOURCE_REGIONS.SYSTEM_REGION.value,
            size=setup_sz, label='setup')
        spec.reserve_memory_region(
            region=self._POISSON_SPIKE_SOURCE_REGIONS
                       .POISSON_PARAMS_REGION.value,
            size=poisson_params_sz, label='PoissonParams')
        self.reserve_buffer_regions(
            spec, self._POISSON_SPIKE_SOURCE_REGIONS.BUFFERING_OUT_STATE.value,
            [self._POISSON_SPIKE_SOURCE_REGIONS.SPIKE_HISTORY_REGION.value],
            [spike_hist_buff_sz])

    def _write_setup_info(
            self, spec, spike_history_region_sz, ip_tags,
            buffer_size_before_receive):
        """ Write information used to control the simulation and gathering of\
            results.

        :param spec:
        :param spike_history_region_sz:
        :param ip_rags
        :return:
        """

        self._write_basic_setup_info(
            spec, self._POISSON_SPIKE_SOURCE_REGIONS.SYSTEM_REGION.value)
        self.write_recording_data(
            spec, ip_tags, [spike_history_region_sz],
            buffer_size_before_receive, self._time_between_requests)

    def _write_poisson_parameters(self, spec, key, num_neurons):
        """ Generate Neuron Parameter data for Poisson spike sources

        :param spec:
        :param key:
        :param num_neurons:
        :return:
        """
        spec.comment("\nWriting Neuron Parameters for {} poisson sources:\n"
                     .format(num_neurons))

        # Set the focus to the memory region 2 (neuron parameters):
        spec.switch_write_focus(
            region=self._POISSON_SPIKE_SOURCE_REGIONS
                       .POISSON_PARAMS_REGION.value)

        # Write header info to the memory region:

        # Write Key info for this core:
        if key is None:
            # if there's no key, then two false will cover it.
            spec.write_value(data=0)
            spec.write_value(data=0)
        else:
            # has a key, thus set has key to 1 and then add key
            spec.write_value(data=1)
            spec.write_value(data=key)

        # Write the random seed (4 words), generated randomly!
        spec.write_value(data=self._rng.randint(0x7FFFFFFF))
        spec.write_value(data=self._rng.randint(0x7FFFFFFF))
        spec.write_value(data=self._rng.randint(0x7FFFFFFF))
        spec.write_value(data=self._rng.randint(0x7FFFFFFF))

        # For each neuron, get the rate to work out if it is a slow
        # or fast source
        slow_sources = list()
        fast_sources = list()
        for i in range(0, num_neurons):

            # Get the parameter values for source i:
            rate_val = generate_parameter(self._rate, i)
            start_val = generate_parameter(self._start, i)
            end_val = None
            if self._duration is not None:
                end_val = generate_parameter(self._duration, i) + start_val

            # Decide if it is a fast or slow source and
            spikes_per_tick = \
                (float(rate_val) * (self._machine_time_step / 1000000.0))
            if spikes_per_tick <= SLOW_RATE_PER_TICK_CUTOFF:
                slow_sources.append([i, rate_val, start_val, end_val])
            else:
                fast_sources.append([i, spikes_per_tick, start_val, end_val])

        # Write the numbers of each type of source
        spec.write_value(data=len(slow_sources))
        spec.write_value(data=len(fast_sources))

        # Now write one struct for each slow source as follows
        #
        #   typedef struct slow_spike_source_t
        #   {
        #     uint32_t neuron_id;
        #     uint32_t start_ticks;
        #     uint32_t end_ticks;
        #
        #     accum mean_isi_ticks;
        #     accum time_to_spike_ticks;
        #   } slow_spike_source_t;
        for (neuron_id, rate_val, start_val, end_val) in slow_sources:
            if rate_val == 0:
                isi_val = 0
            else:
                isi_val = float(1000000.0 /
                                (rate_val * self._machine_time_step))
            start_scaled = int(start_val * 1000.0 / self._machine_time_step)
            end_scaled = 0xFFFFFFFF
            if end_val is not None:
                end_scaled = int(end_val * 1000.0 / self._machine_time_step)
            spec.write_value(data=neuron_id, data_type=DataType.UINT32)
            spec.write_value(data=start_scaled, data_type=DataType.UINT32)
            spec.write_value(data=end_scaled, data_type=DataType.UINT32)
            spec.write_value(data=isi_val, data_type=DataType.S1615)
            spec.write_value(data=0x0, data_type=DataType.UINT32)

        # Now write
        #   typedef struct fast_spike_source_t
        #   {
        #     uint32_t neuron_id;
        #     uint32_t start_ticks;
        #     uint32_t end_ticks;
        #
        #     unsigned long fract exp_minus_lambda;
        #   } fast_spike_source_t;
        for (neuron_id, spikes_per_tick, start_val, end_val) in fast_sources:
            if spikes_per_tick == 0:
                exp_minus_lamda = 0
            else:
                exp_minus_lamda = math.exp(-1.0 * spikes_per_tick)
            start_scaled = int(start_val * 1000.0 / self._machine_time_step)
            end_scaled = 0xFFFFFFFF
            if end_val is not None:
                end_scaled = int(end_val * 1000.0 / self._machine_time_step)
            spec.write_value(data=neuron_id, data_type=DataType.UINT32)
            spec.write_value(data=start_scaled, data_type=DataType.UINT32)
            spec.write_value(data=end_scaled, data_type=DataType.UINT32)
            spec.write_value(data=exp_minus_lamda, data_type=DataType.U032)

    # @implements AbstractSpikeRecordable.is_recording_spikes
    def is_recording_spikes(self):
        return self._spike_recorder.record

    # @implements AbstractSpikeRecordable.set_recording_spikes
    def set_recording_spikes(self):
        ip_address = config.get("Buffers", "receive_buffer_host")
        port = config.getint("Buffers", "receive_buffer_port")
        self.set_buffering_output(ip_address, port)
        self._spike_recorder.record = True

    # inherited from partitionable vertex
    def get_sdram_usage_for_atoms(self, vertex_slice, graph):
        poisson_params_sz = self.get_params_bytes(vertex_slice)
        spike_hist_buff_sz = min((
            self._spike_recorder.get_sdram_usage_in_bytes(
                vertex_slice.n_atoms, self._no_machine_time_steps),
            self._spike_buffer_max_size))
        total_size = \
            ((constants.DATA_SPECABLE_BASIC_SETUP_INFO_N_WORDS * 4) +
             self.get_recording_data_size(1) +
             self.get_buffer_state_region_size(1) +
             poisson_params_sz + spike_hist_buff_sz)
        total_size += self._get_number_of_mallocs_used_by_dsg(
            vertex_slice, graph.incoming_edges_to_vertex(self)) * \
            front_end_common_constants.SARK_PER_MALLOC_SDRAM_USAGE
        return total_size

    def _get_number_of_mallocs_used_by_dsg(self, vertex_slice, in_edges):
        standard_mallocs = self._DEFAULT_MALLOCS_USED
        if self._spike_recorder.record:
            standard_mallocs += 1
        return standard_mallocs

    def get_dtcm_usage_for_atoms(self, vertex_slice, graph):
        return 0

    def get_cpu_usage_for_atoms(self, vertex_slice, graph):
        return 0

    def generate_data_spec(self, subvertex, placement, subgraph, graph,
                           routing_info, hostname, graph_mapper, report_folder,
                           ip_tags, reverse_ip_tags, write_text_specs,
                           application_run_time_folder):
        data_writer, report_writer = \
            self.get_data_spec_file_writers(
                placement.x, placement.y, placement.p, hostname, report_folder,
                write_text_specs, application_run_time_folder)

        spec = DataSpecificationGenerator(data_writer, report_writer)

        vertex_slice = graph_mapper.get_subvertex_slice(subvertex)

        spike_hist_buff_sz = self._spike_recorder.get_sdram_usage_in_bytes(
            vertex_slice.n_atoms, self._no_machine_time_steps)
        buffer_size_before_receive = self._buffer_size_before_receive
        if config.getboolean("Buffers", "enable_buffered_recording"):
            if spike_hist_buff_sz < self._spike_buffer_max_size:
                buffer_size_before_receive = spike_hist_buff_sz + 256
            else:
                spike_hist_buff_sz = self._spike_buffer_max_size
        else:
            buffer_size_before_receive = spike_hist_buff_sz + 256

        spec.comment("\n*** Spec for SpikeSourcePoisson Instance ***\n\n")

        # Basic setup plus 8 bytes for recording flags and recording size
        setup_sz = ((constants.DATA_SPECABLE_BASIC_SETUP_INFO_N_WORDS * 4) +
                    self.get_recording_data_size(1))

        poisson_params_sz = self.get_params_bytes(vertex_slice)

        # Reserve SDRAM space for memory areas:
        self.reserve_memory_regions(
            spec, setup_sz, poisson_params_sz, spike_hist_buff_sz)

        self._write_setup_info(
            spec, spike_hist_buff_sz, ip_tags, buffer_size_before_receive)

        # Every subedge should have the same key
        key = None
        subedges = subgraph.outgoing_subedges_from_subvertex(subvertex)
        if len(subedges) > 0:
            keys_and_masks = routing_info.get_keys_and_masks_from_subedge(
                subedges[0])
            key = keys_and_masks[0].key

        self._write_poisson_parameters(spec, key, vertex_slice.n_atoms)

        # End-of-Spec:
        spec.end_specification()
        data_writer.close()

        return [data_writer.filename]

    def get_binary_file_name(self):
        return "spike_source_poisson.aplx"

    def get_spikes(self, placements, graph_mapper, buffer_manager):
        return self._spike_recorder.get_spikes(
            self._label, buffer_manager,
            self._POISSON_SPIKE_SOURCE_REGIONS.SPIKE_HISTORY_REGION.value,
            self._POISSON_SPIKE_SOURCE_REGIONS.BUFFERING_OUT_STATE.value,
            placements, graph_mapper, self)

    def get_outgoing_edge_constraints(self, partitioned_edge, graph_mapper):
        return [KeyAllocatorContiguousRangeContraint()]

    def is_data_specable(self):
        return True
Beispiel #5
0
class SpikeSourcePoisson(
        AbstractPartitionableVertex,
        AbstractDataSpecableVertex, AbstractSpikeRecordable,
        AbstractProvidesOutgoingPartitionConstraints,
        PopulationSettableChangeRequiresMapping):
    """ A Poisson Spike source object
    """

    _POISSON_SPIKE_SOURCE_REGIONS = Enum(
        value="_POISSON_SPIKE_SOURCE_REGIONS",
        names=[('SYSTEM_REGION', 0),
               ('POISSON_PARAMS_REGION', 1),
               ('SPIKE_HISTORY_REGION', 2),
               ('BUFFERING_OUT_STATE', 3),
               ('PROVENANCE_REGION', 4)])

    _N_POPULATION_RECORDING_REGIONS = 1
    _DEFAULT_MALLOCS_USED = 2

    # Technically, this is ~2900 in terms of DTCM, but is timescale dependent
    # in terms of CPU (2900 at 10 times slow down is fine, but not at
    # real-time)
    _model_based_max_atoms_per_core = 500

    # A count of the number of poisson subvertices, to work out the random
    # back off range
    _n_poisson_subvertices = 0

    def __init__(
            self, n_neurons, machine_time_step, timescale_factor,
            constraints=None, label="SpikeSourcePoisson", rate=1.0, start=0.0,
            duration=None, seed=None):
        AbstractPartitionableVertex.__init__(
            self, n_neurons, label, self._model_based_max_atoms_per_core,
            constraints)
        AbstractDataSpecableVertex.__init__(
            self, machine_time_step=machine_time_step,
            timescale_factor=timescale_factor)
        AbstractSpikeRecordable.__init__(self)
        AbstractProvidesOutgoingPartitionConstraints.__init__(self)
        PopulationSettableChangeRequiresMapping.__init__(self)

        # Store the parameters
        self._rate = rate
        self._start = start
        self._duration = duration
        self._rng = numpy.random.RandomState(seed)

        # Prepare for recording, and to get spikes
        self._spike_recorder = SpikeRecorder(machine_time_step)
        self._spike_buffer_max_size = config.getint(
            "Buffers", "spike_buffer_size")
        self._buffer_size_before_receive = config.getint(
            "Buffers", "buffer_size_before_receive")
        self._time_between_requests = config.getint(
            "Buffers", "time_between_requests")
        self._enable_buffered_recording = config.getboolean(
            "Buffers", "enable_buffered_recording")
        self._receive_buffer_host = config.get(
            "Buffers", "receive_buffer_host")
        self._receive_buffer_port = config.getint(
            "Buffers", "receive_buffer_port")
        self._minimum_buffer_sdram = config.getint(
            "Buffers", "minimum_buffer_sdram")
        self._using_auto_pause_and_resume = config.getboolean(
            "Buffers", "use_auto_pause_and_resume")

    def create_subvertex(
            self, vertex_slice, resources_required, label=None,
            constraints=None):
        SpikeSourcePoisson._n_poisson_subvertices += 1
        subvertex = SpikeSourcePoissonPartitionedVertex(
            resources_required, label, self._spike_recorder.record,
            constraints)
        if not self._using_auto_pause_and_resume:
            spike_buffer_size = self._spike_recorder.get_sdram_usage_in_bytes(
                vertex_slice.n_atoms, self._no_machine_time_steps)
            spike_buffering_needed = recording_utils.needs_buffering(
                self._spike_buffer_max_size, spike_buffer_size,
                self._enable_buffered_recording)
            if spike_buffering_needed:
                subvertex.activate_buffering_output(
                    buffering_ip_address=self._receive_buffer_host,
                    buffering_port=self._receive_buffer_port)
        else:
            sdram_per_ts = self._spike_recorder.get_sdram_usage_in_bytes(
                vertex_slice.n_atoms, 1)
            subvertex.activate_buffering_output(
                minimum_sdram_for_buffering=self._minimum_buffer_sdram,
                buffered_sdram_per_timestep=sdram_per_ts)

        return subvertex

    @property
    def rate(self):
        return self._rate

    @rate.setter
    def rate(self, rate):
        self._rate = rate

    @property
    def start(self):
        return self._start

    @start.setter
    def start(self, start):
        self._start = start

    @property
    def duration(self):
        return self._duration

    @duration.setter
    def duration(self, duration):
        self._duration = duration

    @property
    def seed(self):
        return self._seed

    @seed.setter
    def seed(self, seed):
        self._seed = seed

    @property
    def model_name(self):
        """ Return a string representing a label for this class.
        """
        return "SpikeSourcePoisson"

    @staticmethod
    def set_model_max_atoms_per_core(new_value):
        SpikeSourcePoisson._model_based_max_atoms_per_core = new_value

    @staticmethod
    def get_params_bytes(vertex_slice):
        """ Gets the size of the poisson parameters in bytes
        :param vertex_slice:
        """
        return (RANDOM_SEED_WORDS + PARAMS_BASE_WORDS +
                (((vertex_slice.hi_atom - vertex_slice.lo_atom) + 1) *
                 PARAMS_WORDS_PER_NEURON)) * 4

    def reserve_memory_regions(self, spec, setup_sz, poisson_params_sz,
                               spike_hist_buff_sz, subvertex):
        """ Reserve memory regions for poisson source parameters and output\
            buffer.
        :param spec:
        :param setup_sz:
        :param poisson_params_sz:
        :param spike_hist_buff_sz:
        :return:
        """
        spec.comment("\nReserving memory space for data regions:\n\n")

        # Reserve memory:
        spec.reserve_memory_region(
            region=(
                SpikeSourcePoissonPartitionedVertex.
                _POISSON_SPIKE_SOURCE_REGIONS.SYSTEM_REGION.value),
            size=setup_sz, label='setup')
        spec.reserve_memory_region(
            region=(
                SpikeSourcePoissonPartitionedVertex.
                _POISSON_SPIKE_SOURCE_REGIONS.POISSON_PARAMS_REGION.value),
            size=poisson_params_sz, label='PoissonParams')
        subvertex.reserve_buffer_regions(
            spec,
            (SpikeSourcePoissonPartitionedVertex.
                _POISSON_SPIKE_SOURCE_REGIONS.BUFFERING_OUT_STATE.value),
            [SpikeSourcePoissonPartitionedVertex.
                _POISSON_SPIKE_SOURCE_REGIONS.SPIKE_HISTORY_REGION.value],
            [spike_hist_buff_sz])
        subvertex.reserve_provenance_data_region(spec)

    def _write_setup_info(
            self, spec, spike_history_region_sz, ip_tags,
            buffer_size_before_receive, subvertex):
        """ Write information used to control the simulation and gathering of\
            results.

        :param spec:
        :param spike_history_region_sz:
        :param ip_rags
        :return:
        """

        self._write_basic_setup_info(
            spec,
            (SpikeSourcePoissonPartitionedVertex.
                _POISSON_SPIKE_SOURCE_REGIONS.SYSTEM_REGION.value))
        subvertex.write_recording_data(
            spec, ip_tags, [spike_history_region_sz],
            buffer_size_before_receive, self._time_between_requests)

    def _write_poisson_parameters(self, spec, key, vertex_slice):
        """ Generate Neuron Parameter data for Poisson spike sources

        :param spec:
        :param key:
        :param num_neurons:
        :return:
        """
        spec.comment("\nWriting Neuron Parameters for {} poisson sources:\n"
                     .format(vertex_slice.n_atoms))

        # Set the focus to the memory region 2 (neuron parameters):
        spec.switch_write_focus(
            region=(
                SpikeSourcePoissonPartitionedVertex.
                _POISSON_SPIKE_SOURCE_REGIONS.POISSON_PARAMS_REGION.value))

        # Write header info to the memory region:

        # Write Key info for this core:
        if key is None:
            # if there's no key, then two false will cover it.
            spec.write_value(data=0)
            spec.write_value(data=0)
        else:
            # has a key, thus set has key to 1 and then add key
            spec.write_value(data=1)
            spec.write_value(data=key)

        # Write the random back off value
        spec.write_value(random.randint(
            0, SpikeSourcePoisson._n_poisson_subvertices))

        # Write the random seed (4 words), generated randomly!
        spec.write_value(data=self._rng.randint(0x7FFFFFFF))
        spec.write_value(data=self._rng.randint(0x7FFFFFFF))
        spec.write_value(data=self._rng.randint(0x7FFFFFFF))
        spec.write_value(data=self._rng.randint(0x7FFFFFFF))

        # For each neuron, get the rate to work out if it is a slow
        # or fast source
        slow_sources = list()
        fast_sources = list()
        for i in range(vertex_slice.n_atoms):

            atom_id = vertex_slice.lo_atom + i

            # Get the parameter values for source i:
            rate_val = generate_parameter(self._rate, atom_id)
            start_val = generate_parameter(self._start, atom_id)
            end_val = None
            if self._duration is not None:
                end_val = generate_parameter(
                    self._duration, atom_id) + start_val

            # Decide if it is a fast or slow source and
            spikes_per_tick = \
                (float(rate_val) * (self._machine_time_step / 1000000.0))
            if spikes_per_tick <= SLOW_RATE_PER_TICK_CUTOFF:
                slow_sources.append([i, rate_val, start_val, end_val])
            else:
                fast_sources.append([i, spikes_per_tick, start_val, end_val])

        # Write the numbers of each type of source
        spec.write_value(data=len(slow_sources))
        spec.write_value(data=len(fast_sources))

        # Now write one struct for each slow source as follows
        #
        #   typedef struct slow_spike_source_t
        #   {
        #     uint32_t neuron_id;
        #     uint32_t start_ticks;
        #     uint32_t end_ticks;
        #
        #     accum mean_isi_ticks;
        #     accum time_to_spike_ticks;
        #   } slow_spike_source_t;
        for (neuron_id, rate_val, start_val, end_val) in slow_sources:
            if rate_val == 0:
                isi_val = 0
            else:
                isi_val = float(1000000.0 /
                                (rate_val * self._machine_time_step))
            start_scaled = int(start_val * 1000.0 / self._machine_time_step)
            end_scaled = 0xFFFFFFFF
            if end_val is not None:
                end_scaled = int(end_val * 1000.0 / self._machine_time_step)
            spec.write_value(data=neuron_id, data_type=DataType.UINT32)
            spec.write_value(data=start_scaled, data_type=DataType.UINT32)
            spec.write_value(data=end_scaled, data_type=DataType.UINT32)
            spec.write_value(data=isi_val, data_type=DataType.S1615)
            spec.write_value(data=0x0, data_type=DataType.UINT32)

        # Now write
        #   typedef struct fast_spike_source_t
        #   {
        #     uint32_t neuron_id;
        #     uint32_t start_ticks;
        #     uint32_t end_ticks;
        #
        #     unsigned long fract exp_minus_lambda;
        #   } fast_spike_source_t;
        for (neuron_id, spikes_per_tick, start_val, end_val) in fast_sources:
            if spikes_per_tick == 0:
                exp_minus_lamda = 0
            else:
                exp_minus_lamda = math.exp(-1.0 * spikes_per_tick)
            start_scaled = int(start_val * 1000.0 / self._machine_time_step)
            end_scaled = 0xFFFFFFFF
            if end_val is not None:
                end_scaled = int(end_val * 1000.0 / self._machine_time_step)
            spec.write_value(data=neuron_id, data_type=DataType.UINT32)
            spec.write_value(data=start_scaled, data_type=DataType.UINT32)
            spec.write_value(data=end_scaled, data_type=DataType.UINT32)
            spec.write_value(data=exp_minus_lamda, data_type=DataType.U032)

    # @implements AbstractSpikeRecordable.is_recording_spikes
    def is_recording_spikes(self):
        return self._spike_recorder.record

    # @implements AbstractSpikeRecordable.set_recording_spikes
    def set_recording_spikes(self):
        self._spike_recorder.record = True

    # inherited from partitionable vertex
    def get_sdram_usage_for_atoms(self, vertex_slice, graph):
        poisson_params_sz = self.get_params_bytes(vertex_slice)
        total_size = \
            ((front_end_common_constants.
              DATA_SPECABLE_BASIC_SETUP_INFO_N_WORDS * 4) +
             ReceiveBuffersToHostBasicImpl.get_recording_data_size(1) +
             ReceiveBuffersToHostBasicImpl.get_buffer_state_region_size(1) +
             SpikeSourcePoissonPartitionedVertex.get_provenance_data_size(0) +
             poisson_params_sz)
        total_size += self._get_number_of_mallocs_used_by_dsg(
            vertex_slice, graph.incoming_edges_to_vertex(self)) * \
            front_end_common_constants.SARK_PER_MALLOC_SDRAM_USAGE

        if self._using_auto_pause_and_resume:
            total_size += self._minimum_buffer_sdram
        else:
            spike_buffer_size = self._spike_recorder.get_sdram_usage_in_bytes(
                vertex_slice.n_atoms, self._no_machine_time_steps)
            total_size += recording_utils.get_buffer_sizes(
                self._spike_buffer_max_size, spike_buffer_size,
                self._enable_buffered_recording)

        return total_size

    def _get_number_of_mallocs_used_by_dsg(self, vertex_slice, in_edges):
        standard_mallocs = self._DEFAULT_MALLOCS_USED
        if self._spike_recorder.record:
            standard_mallocs += 1
        return standard_mallocs

    def get_dtcm_usage_for_atoms(self, vertex_slice, graph):
        return 0

    def get_cpu_usage_for_atoms(self, vertex_slice, graph):
        return 0

    def generate_data_spec(
            self, subvertex, placement, partitioned_graph, graph, routing_info,
            hostname, graph_mapper, report_folder, ip_tags, reverse_ip_tags,
            write_text_specs, application_run_time_folder):
        data_writer, report_writer = \
            self.get_data_spec_file_writers(
                placement.x, placement.y, placement.p, hostname, report_folder,
                write_text_specs, application_run_time_folder)

        spec = DataSpecificationGenerator(data_writer, report_writer)

        vertex_slice = graph_mapper.get_subvertex_slice(subvertex)

        spike_buffer_size = self._spike_recorder.get_sdram_usage_in_bytes(
            vertex_slice.n_atoms, self._no_machine_time_steps)
        spike_history_sz = recording_utils.get_buffer_sizes(
            self._spike_buffer_max_size, spike_buffer_size,
            self._enable_buffered_recording)
        spike_buffering_needed = recording_utils.needs_buffering(
            self._spike_buffer_max_size, spike_buffer_size,
            self._enable_buffered_recording)
        buffer_size_before_receive = self._buffer_size_before_receive
        if not spike_buffering_needed:
            buffer_size_before_receive = spike_history_sz + 256

        spec.comment("\n*** Spec for SpikeSourcePoisson Instance ***\n\n")

        # Basic setup plus 8 bytes for recording flags and recording size
        setup_sz = ((front_end_common_constants.
                     DATA_SPECABLE_BASIC_SETUP_INFO_N_WORDS * 4) +
                    subvertex.get_recording_data_size(1))

        poisson_params_sz = self.get_params_bytes(vertex_slice)

        # Reserve SDRAM space for memory areas:
        self.reserve_memory_regions(
            spec, setup_sz, poisson_params_sz, spike_history_sz, subvertex)

        self._write_setup_info(
            spec, spike_history_sz, ip_tags, buffer_size_before_receive,
            subvertex)

        # Every subedge should have the same key
        key = None
        partitions = partitioned_graph.\
            outgoing_edges_partitions_from_vertex(subvertex)
        for partition in partitions.values():
            keys_and_masks = \
                routing_info.get_keys_and_masks_from_partition(partition)
            key = keys_and_masks[0].key

        self._write_poisson_parameters(spec, key, vertex_slice)

        # End-of-Spec:
        spec.end_specification()
        data_writer.close()

        return data_writer.filename

    def get_binary_file_name(self):
        return "spike_source_poisson.aplx"

    def get_spikes(self, placements, graph_mapper, buffer_manager):
        return self._spike_recorder.get_spikes(
            self._label, buffer_manager,
            (SpikeSourcePoissonPartitionedVertex.
                _POISSON_SPIKE_SOURCE_REGIONS.SPIKE_HISTORY_REGION.value),
            (SpikeSourcePoissonPartitionedVertex.
                _POISSON_SPIKE_SOURCE_REGIONS.BUFFERING_OUT_STATE.value),
            placements, graph_mapper, self)

    def get_outgoing_partition_constraints(self, partition, graph_mapper):
        return [KeyAllocatorContiguousRangeContraint()]

    def is_data_specable(self):
        return True
    def __init__(
            self, n_neurons, binary, label, max_atoms_per_core,
            machine_time_step, timescale_factor, spikes_per_second,
            ring_buffer_sigma, model_name, neuron_model, input_type,
            synapse_type, threshold_type, additional_input=None,
            constraints=None):

        ReceiveBuffersToHostBasicImpl.__init__(self)
        AbstractPartitionableVertex.__init__(
            self, n_neurons, label, max_atoms_per_core, constraints)
        AbstractDataSpecableVertex.__init__(
            self, machine_time_step, timescale_factor)
        AbstractSpikeRecordable.__init__(self)
        AbstractVRecordable.__init__(self)
        AbstractGSynRecordable.__init__(self)
        AbstractProvidesOutgoingEdgeConstraints.__init__(self)
        AbstractProvidesIncomingEdgeConstraints.__init__(self)
        AbstractPopulationInitializable.__init__(self)
        AbstractPopulationSettable.__init__(self)
        AbstractMappable.__init__(self)

        self._binary = binary
        self._label = label
        self._machine_time_step = machine_time_step
        self._timescale_factor = timescale_factor

        self._model_name = model_name
        self._neuron_model = neuron_model
        self._input_type = input_type
        self._threshold_type = threshold_type
        self._additional_input = additional_input

        # Set up for recording
        self._spike_recorder = SpikeRecorder(machine_time_step)
        self._v_recorder = VRecorder(machine_time_step)
        self._gsyn_recorder = GsynRecorder(machine_time_step)
        self._spike_buffer_max_size = config.getint(
            "Buffers", "spike_buffer_size")
        self._v_buffer_max_size = config.getint(
            "Buffers", "v_buffer_size")
        self._gsyn_buffer_max_size = config.getint(
            "Buffers", "gsyn_buffer_size")
        self._buffer_size_before_receive = config.getint(
            "Buffers", "buffer_size_before_receive")
        self._time_between_requests = config.getint(
            "Buffers", "time_between_requests")

        # Set up synapse handling
        self._synapse_manager = SynapticManager(
            synapse_type, machine_time_step, ring_buffer_sigma,
            spikes_per_second)

        # Get buffering information for later use
        self._receive_buffer_host = config.get(
            "Buffers", "receive_buffer_host")
        self._receive_buffer_port = config.getint(
            "Buffers", "receive_buffer_port")
        self._enable_buffered_recording = config.getboolean(
            "Buffers", "enable_buffered_recording")

        # bool for if state has changed.
        self._change_requires_mapping = True
class AbstractPopulationVertex(
        AbstractPartitionableVertex, AbstractDataSpecableVertex,
        AbstractSpikeRecordable, AbstractVRecordable, AbstractGSynRecordable,
        AbstractProvidesOutgoingEdgeConstraints,
        AbstractProvidesIncomingEdgeConstraints,
        AbstractPopulationInitializable, AbstractPopulationSettable,
        AbstractMappable, ReceiveBuffersToHostBasicImpl):
    """ Underlying vertex model for Neural Populations.
    """

    def __init__(
            self, n_neurons, binary, label, max_atoms_per_core,
            machine_time_step, timescale_factor, spikes_per_second,
            ring_buffer_sigma, model_name, neuron_model, input_type,
            synapse_type, threshold_type, additional_input=None,
            constraints=None):

        ReceiveBuffersToHostBasicImpl.__init__(self)
        AbstractPartitionableVertex.__init__(
            self, n_neurons, label, max_atoms_per_core, constraints)
        AbstractDataSpecableVertex.__init__(
            self, machine_time_step, timescale_factor)
        AbstractSpikeRecordable.__init__(self)
        AbstractVRecordable.__init__(self)
        AbstractGSynRecordable.__init__(self)
        AbstractProvidesOutgoingEdgeConstraints.__init__(self)
        AbstractProvidesIncomingEdgeConstraints.__init__(self)
        AbstractPopulationInitializable.__init__(self)
        AbstractPopulationSettable.__init__(self)
        AbstractMappable.__init__(self)

        self._binary = binary
        self._label = label
        self._machine_time_step = machine_time_step
        self._timescale_factor = timescale_factor

        self._model_name = model_name
        self._neuron_model = neuron_model
        self._input_type = input_type
        self._threshold_type = threshold_type
        self._additional_input = additional_input

        # Set up for recording
        self._spike_recorder = SpikeRecorder(machine_time_step)
        self._v_recorder = VRecorder(machine_time_step)
        self._gsyn_recorder = GsynRecorder(machine_time_step)
        self._spike_buffer_max_size = config.getint(
            "Buffers", "spike_buffer_size")
        self._v_buffer_max_size = config.getint(
            "Buffers", "v_buffer_size")
        self._gsyn_buffer_max_size = config.getint(
            "Buffers", "gsyn_buffer_size")
        self._buffer_size_before_receive = config.getint(
            "Buffers", "buffer_size_before_receive")
        self._time_between_requests = config.getint(
            "Buffers", "time_between_requests")

        # Set up synapse handling
        self._synapse_manager = SynapticManager(
            synapse_type, machine_time_step, ring_buffer_sigma,
            spikes_per_second)

        # Get buffering information for later use
        self._receive_buffer_host = config.get(
            "Buffers", "receive_buffer_host")
        self._receive_buffer_port = config.getint(
            "Buffers", "receive_buffer_port")
        self._enable_buffered_recording = config.getboolean(
            "Buffers", "enable_buffered_recording")

        # bool for if state has changed.
        self._change_requires_mapping = True

    @property
    def requires_mapping(self):
        return self._change_requires_mapping

    def mark_no_changes(self):
        self._change_requires_mapping = False

    def create_subvertex(self, vertex_slice, resources_required, label=None,
                         constraints=None):
        return PopulationPartitionedVertex(
            self.buffering_output(), resources_required, label, constraints)

    # @implements AbstractPopulationVertex.get_cpu_usage_for_atoms
    def get_cpu_usage_for_atoms(self, vertex_slice, graph):
        per_neuron_cycles = (
            _NEURON_BASE_N_CPU_CYCLES_PER_NEURON +
            self._neuron_model.get_n_cpu_cycles_per_neuron() +
            self._input_type.get_n_cpu_cycles_per_neuron(
                self._synapse_manager.synapse_type.get_n_synapse_types()) +
            self._threshold_type.get_n_cpu_cycles_per_neuron())
        if self._additional_input is not None:
            per_neuron_cycles += \
                self._additional_input.get_n_cpu_cycles_per_neuron()
        return (_NEURON_BASE_N_CPU_CYCLES +
                _C_MAIN_BASE_N_CPU_CYCLES +
                (per_neuron_cycles * vertex_slice.n_atoms) +
                self._spike_recorder.get_n_cpu_cycles(vertex_slice.n_atoms) +
                self._v_recorder.get_n_cpu_cycles(vertex_slice.n_atoms) +
                self._gsyn_recorder.get_n_cpu_cycles(vertex_slice.n_atoms) +
                self._synapse_manager.get_n_cpu_cycles(vertex_slice, graph))

    # @implements AbstractPopulationVertex.get_dtcm_usage_for_atoms
    def get_dtcm_usage_for_atoms(self, vertex_slice, graph):
        per_neuron_usage = (
            self._neuron_model.get_dtcm_usage_per_neuron_in_bytes() +
            self._input_type.get_dtcm_usage_per_neuron_in_bytes() +
            self._threshold_type.get_dtcm_usage_per_neuron_in_bytes())
        if self._additional_input is not None:
            per_neuron_usage += \
                self._additional_input.get_dtcm_usage_per_neuron_in_bytes()
        return (_NEURON_BASE_DTCM_USAGE_IN_BYTES +
                (per_neuron_usage * vertex_slice.n_atoms) +
                self._spike_recorder.get_dtcm_usage_in_bytes() +
                self._v_recorder.get_dtcm_usage_in_bytes() +
                self._gsyn_recorder.get_dtcm_usage_in_bytes() +
                self._synapse_manager.get_dtcm_usage_in_bytes(
                    vertex_slice, graph))

    def _get_sdram_usage_for_neuron_params(self, vertex_slice):
        per_neuron_usage = (
            self._input_type.get_sdram_usage_per_neuron_in_bytes() +
            self._threshold_type.get_sdram_usage_per_neuron_in_bytes())
        if self._additional_input is not None:
            per_neuron_usage += \
                self._additional_input.get_sdram_usage_per_neuron_in_bytes()
        return ((constants.DATA_SPECABLE_BASIC_SETUP_INFO_N_WORDS * 4) +
                self.get_recording_data_size(3) +
                (per_neuron_usage * vertex_slice.n_atoms) +
                self._neuron_model.get_sdram_usage_in_bytes(
                    vertex_slice.n_atoms))

    # @implements AbstractPopulationVertex.get_sdram_usage_for_atoms
    def get_sdram_usage_for_atoms(self, vertex_slice, graph):
        return (self._get_sdram_usage_for_neuron_params(vertex_slice) +
                self.get_buffer_state_region_size(3) +
                min((self._spike_recorder.get_sdram_usage_in_bytes(
                    vertex_slice.n_atoms, self._no_machine_time_steps),
                    self._spike_buffer_max_size)) +
                min((self._v_recorder.get_sdram_usage_in_bytes(
                    vertex_slice.n_atoms, self._no_machine_time_steps),
                    self._v_buffer_max_size)) +
                min((self._gsyn_recorder.get_sdram_usage_in_bytes(
                    vertex_slice.n_atoms, self._no_machine_time_steps),
                    self._gsyn_buffer_max_size)) +
                self._synapse_manager.get_sdram_usage_in_bytes(
                    vertex_slice, graph.incoming_edges_to_vertex(self)) +
                (self._get_number_of_mallocs_used_by_dsg(
                    vertex_slice, graph.incoming_edges_to_vertex(self)) *
                 front_end_common_constants.SARK_PER_MALLOC_SDRAM_USAGE))

    # @implements AbstractPopulationVertex.model_name
    def model_name(self):
        return self._model_name

    def _get_number_of_mallocs_used_by_dsg(self, vertex_slice, in_edges):
        extra_mallocs = 0
        if self._gsyn_recorder.record_gsyn:
            extra_mallocs += 1
        if self._v_recorder.record_v:
            extra_mallocs += 1
        if self._spike_recorder.record:
            extra_mallocs += 1
        return (
            2 + self._synapse_manager.get_number_of_mallocs_used_by_dsg() +
            extra_mallocs)

    def _get_number_of_mallocs_from_basic_model(self):

        # one for system, one for neuron params
        return 2

    def _reserve_memory_regions(
            self, spec, vertex_slice, spike_history_region_sz,
            v_history_region_sz, gsyn_history_region_sz):

        spec.comment("\nReserving memory space for data regions:\n\n")

        # Reserve memory:
        spec.reserve_memory_region(
            region=constants.POPULATION_BASED_REGIONS.SYSTEM.value,
            size=((constants.DATA_SPECABLE_BASIC_SETUP_INFO_N_WORDS * 4) +
                  self.get_recording_data_size(3)), label='System')

        spec.reserve_memory_region(
            region=constants.POPULATION_BASED_REGIONS.NEURON_PARAMS.value,
            size=self._get_sdram_usage_for_neuron_params(vertex_slice),
            label='NeuronParams')

        self.reserve_buffer_regions(
            spec,
            constants.POPULATION_BASED_REGIONS.BUFFERING_OUT_STATE.value,
            [constants.POPULATION_BASED_REGIONS.SPIKE_HISTORY.value,
             constants.POPULATION_BASED_REGIONS.POTENTIAL_HISTORY.value,
             constants.POPULATION_BASED_REGIONS.GSYN_HISTORY.value],
            [spike_history_region_sz, v_history_region_sz,
             gsyn_history_region_sz])

    def _write_setup_info(
            self, spec, spike_history_region_sz, neuron_potential_region_sz,
            gsyn_region_sz, ip_tags, buffer_size_before_receive,
            time_between_requests):
        """ Write information used to control the simulation and gathering of\
            results.
        """

        # Write this to the system region (to be picked up by the simulation):
        self._write_basic_setup_info(
            spec, constants.POPULATION_BASED_REGIONS.SYSTEM.value)
        self.write_recording_data(
            spec, ip_tags,
            [spike_history_region_sz, neuron_potential_region_sz,
             gsyn_region_sz], buffer_size_before_receive,
            time_between_requests)

    def _write_neuron_parameters(
            self, spec, key, vertex_slice):

        n_atoms = (vertex_slice.hi_atom - vertex_slice.lo_atom) + 1
        spec.comment("\nWriting Neuron Parameters for {} Neurons:\n".format(
            n_atoms))

        # Set the focus to the memory region 2 (neuron parameters):
        spec.switch_write_focus(
            region=constants.POPULATION_BASED_REGIONS.NEURON_PARAMS.value)

        # Write whether the key is to be used, and then the key, or 0 if it
        # isn't to be used
        if key is None:
            spec.write_value(data=0)
            spec.write_value(data=0)
        else:
            spec.write_value(data=1)
            spec.write_value(data=key)

        # Write the number of neurons in the block:
        spec.write_value(data=n_atoms)

        # Write the global parameters
        global_params = self._neuron_model.get_global_parameters()
        for param in global_params:
            spec.write_value(data=param.get_value(),
                             data_type=param.get_dataspec_datatype())

        # Write the neuron parameters
        utility_calls.write_parameters_per_neuron(
            spec, vertex_slice, self._neuron_model.get_neural_parameters())

        # Write the input type parameters
        utility_calls.write_parameters_per_neuron(
            spec, vertex_slice, self._input_type.get_input_type_parameters())

        # Write the additional input parameters
        if self._additional_input is not None:
            utility_calls.write_parameters_per_neuron(
                spec, vertex_slice, self._additional_input.get_parameters())

        # Write the threshold type parameters
        utility_calls.write_parameters_per_neuron(
            spec, vertex_slice,
            self._threshold_type.get_threshold_parameters())

    def _get_recording_and_buffer_sizes(self, buffer_max, space_needed):
        if space_needed == 0:
            return 0, False
        if not self._enable_buffered_recording:
            return space_needed, False
        if buffer_max < space_needed:
            return buffer_max, True
        return space_needed, False

    # @implements AbstractDataSpecableVertex.generate_data_spec
    def generate_data_spec(
            self, subvertex, placement, subgraph, graph, routing_info,
            hostname, graph_mapper, report_folder, ip_tags,
            reverse_ip_tags, write_text_specs, application_run_time_folder):

        # Create new DataSpec for this processor:
        data_writer, report_writer = self.get_data_spec_file_writers(
            placement.x, placement.y, placement.p, hostname, report_folder,
            write_text_specs, application_run_time_folder)
        spec = DataSpecificationGenerator(data_writer, report_writer)
        spec.comment("\n*** Spec for block of {} neurons ***\n".format(
            self.model_name))
        vertex_slice = graph_mapper.get_subvertex_slice(subvertex)

        # Get recording sizes - the order is important here as spikes will
        # require less space than voltage and voltage less than gsyn.  This
        # order ensures that the buffer size before receive is optimum for
        # all recording channels
        # TODO: Maybe split the buffer size before receive by channel?
        spike_history_sz, spike_buffering_needed = \
            self._get_recording_and_buffer_sizes(
                self._spike_buffer_max_size,
                self._spike_recorder.get_sdram_usage_in_bytes(
                    vertex_slice.n_atoms, self._no_machine_time_steps))
        v_history_sz, v_buffering_needed = \
            self._get_recording_and_buffer_sizes(
                self._v_buffer_max_size,
                self._v_recorder.get_sdram_usage_in_bytes(
                    vertex_slice.n_atoms, self._no_machine_time_steps))
        gsyn_history_sz, gsyn_buffering_needed = \
            self._get_recording_and_buffer_sizes(
                self._gsyn_buffer_max_size,
                self._gsyn_recorder.get_sdram_usage_in_bytes(
                    vertex_slice.n_atoms, self._no_machine_time_steps))
        buffer_size_before_receive = self._buffer_size_before_receive
        if (not spike_buffering_needed and not v_buffering_needed and
                not gsyn_buffering_needed):
            buffer_size_before_receive = max((
                spike_history_sz, v_history_sz, gsyn_history_sz)) + 256

        # Reserve memory regions
        self._reserve_memory_regions(
            spec, vertex_slice, spike_history_sz, v_history_sz,
            gsyn_history_sz)

        # Declare random number generators and distributions:
        # TODO add random distribution stuff
        # self.write_random_distribution_declarations(spec)

        # Get the key - use only the first edge
        key = None
        if len(subgraph.outgoing_subedges_from_subvertex(subvertex)) > 0:
            keys_and_masks = routing_info.get_keys_and_masks_from_subedge(
                subgraph.outgoing_subedges_from_subvertex(subvertex)[0])

            # NOTE: using the first key assigned as the key.  Should in future
            # get the list of keys and use one per neuron, to allow arbitrary
            # key and mask assignments
            key = keys_and_masks[0].key

        # Write the regions
        self._write_setup_info(
            spec, spike_history_sz, v_history_sz, gsyn_history_sz, ip_tags,
            buffer_size_before_receive, self._time_between_requests)
        self._write_neuron_parameters(spec, key, vertex_slice)

        # allow the synaptic matrix to write its data specable data
        self._synapse_manager.write_data_spec(
            spec, self, vertex_slice, subvertex, placement, subgraph, graph,
            routing_info, hostname, graph_mapper)

        # End the writing of this specification:
        spec.end_specification()
        data_writer.close()

        return [data_writer.filename]

    # @implements AbstractDataSpecableVertex.get_binary_file_name
    def get_binary_file_name(self):

        # Split binary name into title and extension
        binary_title, binary_extension = os.path.splitext(self._binary)

        # Reunite title and extension and return
        return (binary_title + self._synapse_manager.vertex_executable_suffix +
                binary_extension)

    # @implements AbstractSpikeRecordable.is_recording_spikes
    def is_recording_spikes(self):
        return self._spike_recorder.record

    # @implements AbstractSpikeRecordable.set_recording_spikes
    def set_recording_spikes(self):
        self._change_requires_mapping = not self._spike_recorder.record
        self.set_buffering_output(
            self._receive_buffer_host, self._receive_buffer_port)
        self._spike_recorder.record = True

    # @implements AbstractSpikeRecordable.get_spikes
    def get_spikes(self, placements, graph_mapper, buffer_manager):
        return self._spike_recorder.get_spikes(
            self._label, buffer_manager,
            constants.POPULATION_BASED_REGIONS.SPIKE_HISTORY.value,
            constants.POPULATION_BASED_REGIONS.BUFFERING_OUT_STATE.value,
            placements, graph_mapper, self)

    # @implements AbstractVRecordable.is_recording_v
    def is_recording_v(self):
        return self._v_recorder.record_v

    # @implements AbstractVRecordable.set_recording_v
    def set_recording_v(self):
        self.set_buffering_output(
            self._receive_buffer_host, self._receive_buffer_port)
        self._change_requires_mapping = not self._v_recorder.record_v
        self._v_recorder.record_v = True

    # @implements AbstractVRecordable.get_v
    def get_v(self, n_machine_time_steps, placements, graph_mapper,
              buffer_manager):
        return self._v_recorder.get_v(
            self._label, buffer_manager,
            constants.POPULATION_BASED_REGIONS.POTENTIAL_HISTORY.value,
            constants.POPULATION_BASED_REGIONS.BUFFERING_OUT_STATE.value,
            placements, graph_mapper, self)

    # @implements AbstractGSynRecordable.is_recording_gsyn
    def is_recording_gsyn(self):
        return self._gsyn_recorder.record_gsyn

    # @implements AbstractGSynRecordable.set_recording_gsyn
    def set_recording_gsyn(self):
        self.set_buffering_output(
            self._receive_buffer_host, self._receive_buffer_port)
        self._change_requires_mapping = not self._gsyn_recorder.record_gsyn
        self._gsyn_recorder.record_gsyn = True

    # @implements AbstractGSynRecordable.get_gsyn
    def get_gsyn(self, n_machine_time_steps, placements, graph_mapper,
                 buffer_manager):
        return self._gsyn_recorder.get_gsyn(
            self._label, buffer_manager,
            constants.POPULATION_BASED_REGIONS.GSYN_HISTORY.value,
            constants.POPULATION_BASED_REGIONS.BUFFERING_OUT_STATE.value,
            placements, graph_mapper, self)

    def initialize(self, variable, value):
        initialize_attr = getattr(
            self._neuron_model, "initialize_%s" % variable, None)
        if initialize_attr is None or not callable(initialize_attr):
            raise Exception("Vertex does not support initialisation of"
                            " parameter {}".format(variable))
        initialize_attr(value)
        self._change_requires_mapping = True

    @property
    def synapse_type(self):
        return self._synapse_manager.synapse_type

    @property
    def input_type(self):
        return self._input_type

    def get_value(self, key):
        """ Get a property of the overall model
        """
        for obj in [self._neuron_model, self._input_type,
                    self._threshold_type, self._synapse_manager.synapse_type,
                    self._additional_input]:
            if hasattr(obj, key):
                return getattr(obj, key)
        raise Exception("Population {} does not have parameter {}".format(
            self.vertex, key))

    def set_value(self, key, value):
        """ Set a property of the overall model
        """
        for obj in [self._neuron_model, self._input_type,
                    self._threshold_type, self._synapse_manager.synapse_type,
                    self._additional_input]:
            if hasattr(obj, key):
                setattr(obj, key, value)
                self._change_requires_mapping = True
                return
        raise Exception("Type {} does not have parameter {}".format(
            self._model_name, key))

    @property
    def weight_scale(self):
        return self._input_type.get_global_weight_scale()

    @property
    def ring_buffer_sigma(self):
        return self._synapse_manager.ring_buffer_sigma

    @ring_buffer_sigma.setter
    def ring_buffer_sigma(self, ring_buffer_sigma):
        self._synapse_manager.ring_buffer_sigma = ring_buffer_sigma

    @property
    def spikes_per_second(self):
        return self._synapse_manager.spikes_per_second

    @spikes_per_second.setter
    def spikes_per_second(self, spikes_per_second):
        self._synapse_manager.spikes_per_second = spikes_per_second

    def get_synaptic_list_from_machine(
            self, placements, transceiver, pre_subvertex, pre_n_atoms,
            post_subvertex, synapse_io, subgraph, routing_infos,
            weight_scales):
        return self._synapse_manager.get_synaptic_list_from_machine(
            placements, transceiver, pre_subvertex, pre_n_atoms,
            post_subvertex, synapse_io, subgraph, routing_infos, weight_scales)

    def is_data_specable(self):
        return True

    def get_incoming_edge_constraints(self, partitioned_edge, graph_mapper):
        """ Gets the constraints for edges going into this vertex

        :param partitioned_edge: partitioned edge that goes into this vertex
        :param graph_mapper: the graph mapper object
        :return: list of constraints
        """
        return self._synapse_manager.get_incoming_edge_constraints()

    def get_outgoing_edge_constraints(self, partitioned_edge, graph_mapper):
        """ Gets the constraints for edges going out of this vertex
        :param partitioned_edge: the partitioned edge that leaves this vertex
        :param graph_mapper: the graph mapper object
        :return: list of constraints
        """
        return [KeyAllocatorContiguousRangeContraint()]

    def __str__(self):
        return "{} with {} atoms".format(self._label, self.n_atoms)

    def __repr__(self):
        return self.__str__()
Beispiel #8
0
    def __init__(self,
                 n_neurons,
                 binary,
                 label,
                 max_atoms_per_core,
                 machine_time_step,
                 timescale_factor,
                 spikes_per_second,
                 ring_buffer_sigma,
                 incoming_spike_buffer_size,
                 model_name,
                 neuron_model,
                 input_type,
                 synapse_type,
                 threshold_type,
                 additional_input=None,
                 constraints=None):

        AbstractPartitionableVertex.__init__(self, n_neurons, label,
                                             max_atoms_per_core, constraints)
        AbstractDataSpecableVertex.__init__(self, machine_time_step,
                                            timescale_factor)
        AbstractSpikeRecordable.__init__(self)
        AbstractVRecordable.__init__(self)
        AbstractGSynRecordable.__init__(self)
        AbstractProvidesOutgoingPartitionConstraints.__init__(self)
        AbstractProvidesIncomingPartitionConstraints.__init__(self)
        AbstractPopulationInitializable.__init__(self)
        AbstractPopulationSettable.__init__(self)
        AbstractChangableAfterRun.__init__(self)

        self._binary = binary
        self._label = label
        self._machine_time_step = machine_time_step
        self._timescale_factor = timescale_factor
        self._incoming_spike_buffer_size = incoming_spike_buffer_size
        if incoming_spike_buffer_size is None:
            self._incoming_spike_buffer_size = config.getint(
                "Simulation", "incoming_spike_buffer_size")

        self._model_name = model_name
        self._neuron_model = neuron_model
        self._input_type = input_type
        self._threshold_type = threshold_type
        self._additional_input = additional_input

        # Set up for recording
        self._spike_recorder = SpikeRecorder(machine_time_step)
        self._v_recorder = VRecorder(machine_time_step)
        self._gsyn_recorder = GsynRecorder(machine_time_step)
        self._spike_buffer_max_size = config.getint("Buffers",
                                                    "spike_buffer_size")
        self._v_buffer_max_size = config.getint("Buffers", "v_buffer_size")
        self._gsyn_buffer_max_size = config.getint("Buffers",
                                                   "gsyn_buffer_size")
        self._buffer_size_before_receive = config.getint(
            "Buffers", "buffer_size_before_receive")
        self._time_between_requests = config.getint("Buffers",
                                                    "time_between_requests")
        self._minimum_buffer_sdram = config.getint("Buffers",
                                                   "minimum_buffer_sdram")
        self._using_auto_pause_and_resume = config.getboolean(
            "Buffers", "use_auto_pause_and_resume")
        self._receive_buffer_host = config.get("Buffers",
                                               "receive_buffer_host")
        self._receive_buffer_port = config.getint("Buffers",
                                                  "receive_buffer_port")
        self._enable_buffered_recording = config.getboolean(
            "Buffers", "enable_buffered_recording")

        # Set up synapse handling
        self._synapse_manager = SynapticManager(synapse_type,
                                                machine_time_step,
                                                ring_buffer_sigma,
                                                spikes_per_second)

        # bool for if state has changed.
        self._change_requires_mapping = True
Beispiel #9
0
class AbstractPopulationVertex(
        AbstractPartitionableVertex, AbstractDataSpecableVertex,
        AbstractSpikeRecordable, AbstractVRecordable, AbstractGSynRecordable,
        AbstractProvidesOutgoingPartitionConstraints,
        AbstractProvidesIncomingPartitionConstraints,
        AbstractPopulationInitializable, AbstractPopulationSettable,
        AbstractChangableAfterRun):
    """ Underlying vertex model for Neural Populations.
    """
    def __init__(self,
                 n_neurons,
                 binary,
                 label,
                 max_atoms_per_core,
                 machine_time_step,
                 timescale_factor,
                 spikes_per_second,
                 ring_buffer_sigma,
                 incoming_spike_buffer_size,
                 model_name,
                 neuron_model,
                 input_type,
                 synapse_type,
                 threshold_type,
                 additional_input=None,
                 constraints=None):

        AbstractPartitionableVertex.__init__(self, n_neurons, label,
                                             max_atoms_per_core, constraints)
        AbstractDataSpecableVertex.__init__(self, machine_time_step,
                                            timescale_factor)
        AbstractSpikeRecordable.__init__(self)
        AbstractVRecordable.__init__(self)
        AbstractGSynRecordable.__init__(self)
        AbstractProvidesOutgoingPartitionConstraints.__init__(self)
        AbstractProvidesIncomingPartitionConstraints.__init__(self)
        AbstractPopulationInitializable.__init__(self)
        AbstractPopulationSettable.__init__(self)
        AbstractChangableAfterRun.__init__(self)

        self._binary = binary
        self._label = label
        self._machine_time_step = machine_time_step
        self._timescale_factor = timescale_factor
        self._incoming_spike_buffer_size = incoming_spike_buffer_size
        if incoming_spike_buffer_size is None:
            self._incoming_spike_buffer_size = config.getint(
                "Simulation", "incoming_spike_buffer_size")

        self._model_name = model_name
        self._neuron_model = neuron_model
        self._input_type = input_type
        self._threshold_type = threshold_type
        self._additional_input = additional_input

        # Set up for recording
        self._spike_recorder = SpikeRecorder(machine_time_step)
        self._v_recorder = VRecorder(machine_time_step)
        self._gsyn_recorder = GsynRecorder(machine_time_step)
        self._spike_buffer_max_size = config.getint("Buffers",
                                                    "spike_buffer_size")
        self._v_buffer_max_size = config.getint("Buffers", "v_buffer_size")
        self._gsyn_buffer_max_size = config.getint("Buffers",
                                                   "gsyn_buffer_size")
        self._buffer_size_before_receive = config.getint(
            "Buffers", "buffer_size_before_receive")
        self._time_between_requests = config.getint("Buffers",
                                                    "time_between_requests")
        self._minimum_buffer_sdram = config.getint("Buffers",
                                                   "minimum_buffer_sdram")
        self._using_auto_pause_and_resume = config.getboolean(
            "Buffers", "use_auto_pause_and_resume")
        self._receive_buffer_host = config.get("Buffers",
                                               "receive_buffer_host")
        self._receive_buffer_port = config.getint("Buffers",
                                                  "receive_buffer_port")
        self._enable_buffered_recording = config.getboolean(
            "Buffers", "enable_buffered_recording")

        # Set up synapse handling
        self._synapse_manager = SynapticManager(synapse_type,
                                                machine_time_step,
                                                ring_buffer_sigma,
                                                spikes_per_second)

        # bool for if state has changed.
        self._change_requires_mapping = True

    @property
    def requires_mapping(self):
        return self._change_requires_mapping

    def mark_no_changes(self):
        self._change_requires_mapping = False

    def create_subvertex(self,
                         vertex_slice,
                         resources_required,
                         label=None,
                         constraints=None):

        is_recording = (self._gsyn_recorder.record_gsyn
                        or self._v_recorder.record_v
                        or self._spike_recorder.record)
        subvertex = PopulationPartitionedVertex(resources_required, label,
                                                is_recording, constraints)
        if not self._using_auto_pause_and_resume:
            spike_buffer_size = self._spike_recorder.get_sdram_usage_in_bytes(
                vertex_slice.n_atoms, self._no_machine_time_steps)
            v_buffer_size = self._v_recorder.get_sdram_usage_in_bytes(
                vertex_slice.n_atoms, self._no_machine_time_steps)
            gsyn_buffer_size = self._gsyn_recorder.get_sdram_usage_in_bytes(
                vertex_slice.n_atoms, self._no_machine_time_steps)
            spike_buffering_needed = recording_utils.needs_buffering(
                self._spike_buffer_max_size, spike_buffer_size,
                self._enable_buffered_recording)
            v_buffering_needed = recording_utils.needs_buffering(
                self._v_buffer_max_size, v_buffer_size,
                self._enable_buffered_recording)
            gsyn_buffering_needed = recording_utils.needs_buffering(
                self._gsyn_buffer_max_size, gsyn_buffer_size,
                self._enable_buffered_recording)
            if (spike_buffering_needed or v_buffering_needed
                    or gsyn_buffering_needed):
                subvertex.activate_buffering_output(
                    buffering_ip_address=self._receive_buffer_host,
                    buffering_port=self._receive_buffer_port)
        else:
            sdram_per_ts = 0
            sdram_per_ts += self._spike_recorder.get_sdram_usage_in_bytes(
                vertex_slice.n_atoms, 1)
            sdram_per_ts += self._v_recorder.get_sdram_usage_in_bytes(
                vertex_slice.n_atoms, 1)
            sdram_per_ts += self._gsyn_recorder.get_sdram_usage_in_bytes(
                vertex_slice.n_atoms, 1)
            subvertex.activate_buffering_output(
                minimum_sdram_for_buffering=self._minimum_buffer_sdram,
                buffered_sdram_per_timestep=sdram_per_ts)
        return subvertex

    @property
    def maximum_delay_supported_in_ms(self):
        return self._synapse_manager.maximum_delay_supported_in_ms

    # @implements AbstractPopulationVertex.get_cpu_usage_for_atoms
    def get_cpu_usage_for_atoms(self, vertex_slice, graph):
        per_neuron_cycles = (
            _NEURON_BASE_N_CPU_CYCLES_PER_NEURON +
            self._neuron_model.get_n_cpu_cycles_per_neuron() +
            self._input_type.get_n_cpu_cycles_per_neuron(
                self._synapse_manager.synapse_type.get_n_synapse_types()) +
            self._threshold_type.get_n_cpu_cycles_per_neuron())
        if self._additional_input is not None:
            per_neuron_cycles += \
                self._additional_input.get_n_cpu_cycles_per_neuron()
        return (_NEURON_BASE_N_CPU_CYCLES + _C_MAIN_BASE_N_CPU_CYCLES +
                (per_neuron_cycles * vertex_slice.n_atoms) +
                self._spike_recorder.get_n_cpu_cycles(vertex_slice.n_atoms) +
                self._v_recorder.get_n_cpu_cycles(vertex_slice.n_atoms) +
                self._gsyn_recorder.get_n_cpu_cycles(vertex_slice.n_atoms) +
                self._synapse_manager.get_n_cpu_cycles(vertex_slice, graph))

    # @implements AbstractPopulationVertex.get_dtcm_usage_for_atoms
    def get_dtcm_usage_for_atoms(self, vertex_slice, graph):
        per_neuron_usage = (
            self._neuron_model.get_dtcm_usage_per_neuron_in_bytes() +
            self._input_type.get_dtcm_usage_per_neuron_in_bytes() +
            self._threshold_type.get_dtcm_usage_per_neuron_in_bytes())
        if self._additional_input is not None:
            per_neuron_usage += \
                self._additional_input.get_dtcm_usage_per_neuron_in_bytes()
        return (
            _NEURON_BASE_DTCM_USAGE_IN_BYTES +
            (per_neuron_usage * vertex_slice.n_atoms) +
            self._spike_recorder.get_dtcm_usage_in_bytes() +
            self._v_recorder.get_dtcm_usage_in_bytes() +
            self._gsyn_recorder.get_dtcm_usage_in_bytes() +
            self._synapse_manager.get_dtcm_usage_in_bytes(vertex_slice, graph))

    def _get_sdram_usage_for_neuron_params(self, vertex_slice):
        per_neuron_usage = (
            self._input_type.get_sdram_usage_per_neuron_in_bytes() +
            self._threshold_type.get_sdram_usage_per_neuron_in_bytes())
        if self._additional_input is not None:
            per_neuron_usage += \
                self._additional_input.get_sdram_usage_per_neuron_in_bytes()
        return (
            (common_constants.DATA_SPECABLE_BASIC_SETUP_INFO_N_WORDS * 4) +
            ReceiveBuffersToHostBasicImpl.get_recording_data_size(3) +
            (per_neuron_usage * vertex_slice.n_atoms) +
            self._neuron_model.get_sdram_usage_in_bytes(vertex_slice.n_atoms))

    # @implements AbstractPartitionableVertex.get_sdram_usage_for_atoms
    def get_sdram_usage_for_atoms(self, vertex_slice, graph):
        sdram_requirement = (
            self._get_sdram_usage_for_neuron_params(vertex_slice) +
            ReceiveBuffersToHostBasicImpl.get_buffer_state_region_size(3) +
            PopulationPartitionedVertex.get_provenance_data_size(
                PopulationPartitionedVertex.N_ADDITIONAL_PROVENANCE_DATA_ITEMS)
            + self._synapse_manager.get_sdram_usage_in_bytes(
                vertex_slice, graph.incoming_edges_to_vertex(self)) +
            (self._get_number_of_mallocs_used_by_dsg(
                vertex_slice, graph.incoming_edges_to_vertex(self)) *
             common_constants.SARK_PER_MALLOC_SDRAM_USAGE))

        # add recording SDRAM if not automatically calculated
        if not self._using_auto_pause_and_resume:
            spike_buffer_size = self._spike_recorder.get_sdram_usage_in_bytes(
                vertex_slice.n_atoms, self._no_machine_time_steps)
            v_buffer_size = self._v_recorder.get_sdram_usage_in_bytes(
                vertex_slice.n_atoms, self._no_machine_time_steps)
            gsyn_buffer_size = self._gsyn_recorder.get_sdram_usage_in_bytes(
                vertex_slice.n_atoms, self._no_machine_time_steps)
            sdram_requirement += recording_utils.get_buffer_sizes(
                self._spike_buffer_max_size, spike_buffer_size,
                self._enable_buffered_recording)
            sdram_requirement += recording_utils.get_buffer_sizes(
                self._v_buffer_max_size, v_buffer_size,
                self._enable_buffered_recording)
            sdram_requirement += recording_utils.get_buffer_sizes(
                self._gsyn_buffer_max_size, gsyn_buffer_size,
                self._enable_buffered_recording)
        else:
            sdram_requirement += self._minimum_buffer_sdram

        return sdram_requirement

    # @implements AbstractPopulationVertex.model_name
    def model_name(self):
        return self._model_name

    def _get_number_of_mallocs_used_by_dsg(self, vertex_slice, in_edges):
        extra_mallocs = 0
        if self._gsyn_recorder.record_gsyn:
            extra_mallocs += 1
        if self._v_recorder.record_v:
            extra_mallocs += 1
        if self._spike_recorder.record:
            extra_mallocs += 1
        return (2 + self._synapse_manager.get_number_of_mallocs_used_by_dsg() +
                extra_mallocs)

    def _get_number_of_mallocs_from_basic_model(self):

        # one for system, one for neuron params
        return 2

    def _reserve_memory_regions(self, spec, vertex_slice,
                                spike_history_region_sz, v_history_region_sz,
                                gsyn_history_region_sz, subvertex):

        spec.comment("\nReserving memory space for data regions:\n\n")

        # Reserve memory:
        spec.reserve_memory_region(
            region=constants.POPULATION_BASED_REGIONS.SYSTEM.value,
            size=(
                (common_constants.DATA_SPECABLE_BASIC_SETUP_INFO_N_WORDS * 4) +
                subvertex.get_recording_data_size(3)),
            label='System')

        spec.reserve_memory_region(
            region=constants.POPULATION_BASED_REGIONS.NEURON_PARAMS.value,
            size=self._get_sdram_usage_for_neuron_params(vertex_slice),
            label='NeuronParams')

        subvertex.reserve_buffer_regions(
            spec, constants.POPULATION_BASED_REGIONS.BUFFERING_OUT_STATE.value,
            [
                constants.POPULATION_BASED_REGIONS.SPIKE_HISTORY.value,
                constants.POPULATION_BASED_REGIONS.POTENTIAL_HISTORY.value,
                constants.POPULATION_BASED_REGIONS.GSYN_HISTORY.value
            ], [
                spike_history_region_sz, v_history_region_sz,
                gsyn_history_region_sz
            ])

        subvertex.reserve_provenance_data_region(spec)

    def _write_setup_info(self, spec, spike_history_region_sz,
                          neuron_potential_region_sz, gsyn_region_sz, ip_tags,
                          buffer_size_before_receive, time_between_requests,
                          subvertex):
        """ Write information used to control the simulation and gathering of\
            results.
        """

        # Write this to the system region (to be picked up by the simulation):
        self._write_basic_setup_info(
            spec, constants.POPULATION_BASED_REGIONS.SYSTEM.value)
        subvertex.write_recording_data(spec, ip_tags, [
            spike_history_region_sz, neuron_potential_region_sz, gsyn_region_sz
        ], buffer_size_before_receive, time_between_requests)

    def _write_neuron_parameters(self, spec, key, vertex_slice):

        n_atoms = (vertex_slice.hi_atom - vertex_slice.lo_atom) + 1
        spec.comment(
            "\nWriting Neuron Parameters for {} Neurons:\n".format(n_atoms))

        # Set the focus to the memory region 2 (neuron parameters):
        spec.switch_write_focus(
            region=constants.POPULATION_BASED_REGIONS.NEURON_PARAMS.value)

        # Write whether the key is to be used, and then the key, or 0 if it
        # isn't to be used
        if key is None:
            spec.write_value(data=0)
            spec.write_value(data=0)
        else:
            spec.write_value(data=1)
            spec.write_value(data=key)

        # Write the number of neurons in the block:
        spec.write_value(data=n_atoms)

        # Write the size of the incoming spike buffer
        spec.write_value(data=self._incoming_spike_buffer_size)

        # Write the global parameters
        global_params = self._neuron_model.get_global_parameters()
        for param in global_params:
            spec.write_value(data=param.get_value(),
                             data_type=param.get_dataspec_datatype())

        # Write the neuron parameters
        utility_calls.write_parameters_per_neuron(
            spec, vertex_slice, self._neuron_model.get_neural_parameters())

        # Write the input type parameters
        utility_calls.write_parameters_per_neuron(
            spec, vertex_slice, self._input_type.get_input_type_parameters())

        # Write the additional input parameters
        if self._additional_input is not None:
            utility_calls.write_parameters_per_neuron(
                spec, vertex_slice, self._additional_input.get_parameters())

        # Write the threshold type parameters
        utility_calls.write_parameters_per_neuron(
            spec, vertex_slice,
            self._threshold_type.get_threshold_parameters())

    # @implements AbstractDataSpecableVertex.generate_data_spec
    def generate_data_spec(self, subvertex, placement, partitioned_graph,
                           graph, routing_info, hostname, graph_mapper,
                           report_folder, ip_tags, reverse_ip_tags,
                           write_text_specs, application_run_time_folder):

        # Create new DataSpec for this processor:
        data_writer, report_writer = self.get_data_spec_file_writers(
            placement.x, placement.y, placement.p, hostname, report_folder,
            write_text_specs, application_run_time_folder)
        spec = DataSpecificationGenerator(data_writer, report_writer)
        spec.comment("\n*** Spec for block of {} neurons ***\n".format(
            self.model_name))
        vertex_slice = graph_mapper.get_subvertex_slice(subvertex)

        # Get recording sizes - the order is important here as spikes will
        # require less space than voltage and voltage less than gsyn.  This
        # order ensures that the buffer size before receive is optimum for
        # all recording channels
        # TODO: Maybe split the buffer size before receive by channel?
        spike_buffer_size = self._spike_recorder.get_sdram_usage_in_bytes(
            vertex_slice.n_atoms, self._no_machine_time_steps)
        v_buffer_size = self._v_recorder.get_sdram_usage_in_bytes(
            vertex_slice.n_atoms, self._no_machine_time_steps)
        gsyn_buffer_size = self._gsyn_recorder.get_sdram_usage_in_bytes(
            vertex_slice.n_atoms, self._no_machine_time_steps)
        spike_history_sz = recording_utils.get_buffer_sizes(
            self._spike_buffer_max_size, spike_buffer_size,
            self._enable_buffered_recording)
        v_history_sz = recording_utils.get_buffer_sizes(
            self._v_buffer_max_size, v_buffer_size,
            self._enable_buffered_recording)
        gsyn_history_sz = recording_utils.get_buffer_sizes(
            self._gsyn_buffer_max_size, gsyn_buffer_size,
            self._enable_buffered_recording)
        spike_buffering_needed = recording_utils.needs_buffering(
            self._spike_buffer_max_size, spike_buffer_size,
            self._enable_buffered_recording)
        v_buffering_needed = recording_utils.needs_buffering(
            self._v_buffer_max_size, v_buffer_size,
            self._enable_buffered_recording)
        gsyn_buffering_needed = recording_utils.needs_buffering(
            self._gsyn_buffer_max_size, gsyn_buffer_size,
            self._enable_buffered_recording)
        buffer_size_before_receive = self._buffer_size_before_receive
        if (not spike_buffering_needed and not v_buffering_needed
                and not gsyn_buffering_needed):
            buffer_size_before_receive = max(
                (spike_history_sz, v_history_sz, gsyn_history_sz)) + 256

        # Reserve memory regions
        self._reserve_memory_regions(spec, vertex_slice, spike_history_sz,
                                     v_history_sz, gsyn_history_sz, subvertex)

        # Declare random number generators and distributions:
        # TODO add random distribution stuff
        # self.write_random_distribution_declarations(spec)

        # Get the key - use only the first edge
        key = None

        for partition in partitioned_graph.\
                outgoing_edges_partitions_from_vertex(subvertex).values():

            keys_and_masks = \
                routing_info.get_keys_and_masks_from_partition(partition)

            # NOTE: using the first key assigned as the key.  Should in future
            # get the list of keys and use one per neuron, to allow arbitrary
            # key and mask assignments
            key = keys_and_masks[0].key

        # Write the regions
        self._write_setup_info(spec, spike_history_sz, v_history_sz,
                               gsyn_history_sz, ip_tags,
                               buffer_size_before_receive,
                               self._time_between_requests, subvertex)
        self._write_neuron_parameters(spec, key, vertex_slice)

        # allow the synaptic matrix to write its data spec-able data
        self._synapse_manager.write_data_spec(spec, self, vertex_slice,
                                              subvertex, placement,
                                              partitioned_graph, graph,
                                              routing_info, graph_mapper,
                                              self._input_type)

        # End the writing of this specification:
        spec.end_specification()
        data_writer.close()

        return data_writer.filename

    # @implements AbstractDataSpecableVertex.get_binary_file_name
    def get_binary_file_name(self):

        # Split binary name into title and extension
        binary_title, binary_extension = os.path.splitext(self._binary)

        # Reunite title and extension and return
        return (binary_title + self._synapse_manager.vertex_executable_suffix +
                binary_extension)

    # @implements AbstractSpikeRecordable.is_recording_spikes
    def is_recording_spikes(self):
        return self._spike_recorder.record

    # @implements AbstractSpikeRecordable.set_recording_spikes
    def set_recording_spikes(self):
        self._change_requires_mapping = not self._spike_recorder.record
        self._spike_recorder.record = True

    # @implements AbstractSpikeRecordable.get_spikes
    def get_spikes(self, placements, graph_mapper, buffer_manager):
        return self._spike_recorder.get_spikes(
            self._label, buffer_manager,
            constants.POPULATION_BASED_REGIONS.SPIKE_HISTORY.value,
            constants.POPULATION_BASED_REGIONS.BUFFERING_OUT_STATE.value,
            placements, graph_mapper, self)

    # @implements AbstractVRecordable.is_recording_v
    def is_recording_v(self):
        return self._v_recorder.record_v

    # @implements AbstractVRecordable.set_recording_v
    def set_recording_v(self):
        self._change_requires_mapping = not self._v_recorder.record_v
        self._v_recorder.record_v = True

    # @implements AbstractVRecordable.get_v
    def get_v(self, n_machine_time_steps, placements, graph_mapper,
              buffer_manager):
        return self._v_recorder.get_v(
            self._label, buffer_manager,
            constants.POPULATION_BASED_REGIONS.POTENTIAL_HISTORY.value,
            constants.POPULATION_BASED_REGIONS.BUFFERING_OUT_STATE.value,
            placements, graph_mapper, self)

    # @implements AbstractGSynRecordable.is_recording_gsyn
    def is_recording_gsyn(self):
        return self._gsyn_recorder.record_gsyn

    # @implements AbstractGSynRecordable.set_recording_gsyn
    def set_recording_gsyn(self):
        self._change_requires_mapping = not self._gsyn_recorder.record_gsyn
        self._gsyn_recorder.record_gsyn = True

    # @implements AbstractGSynRecordable.get_gsyn
    def get_gsyn(self, n_machine_time_steps, placements, graph_mapper,
                 buffer_manager):
        return self._gsyn_recorder.get_gsyn(
            self._label, buffer_manager,
            constants.POPULATION_BASED_REGIONS.GSYN_HISTORY.value,
            constants.POPULATION_BASED_REGIONS.BUFFERING_OUT_STATE.value,
            placements, graph_mapper, self)

    def initialize(self, variable, value):
        initialize_attr = getattr(self._neuron_model,
                                  "initialize_%s" % variable, None)
        if initialize_attr is None or not callable(initialize_attr):
            raise Exception("Vertex does not support initialisation of"
                            " parameter {}".format(variable))
        initialize_attr(value)
        self._change_requires_mapping = True

    @property
    def synapse_type(self):
        return self._synapse_manager.synapse_type

    @property
    def input_type(self):
        return self._input_type

    def get_value(self, key):
        """ Get a property of the overall model
        """
        for obj in [
                self._neuron_model, self._input_type, self._threshold_type,
                self._synapse_manager.synapse_type, self._additional_input
        ]:
            if hasattr(obj, key):
                return getattr(obj, key)
        raise Exception("Population {} does not have parameter {}".format(
            self.vertex, key))

    def set_value(self, key, value):
        """ Set a property of the overall model
        """
        for obj in [
                self._neuron_model, self._input_type, self._threshold_type,
                self._synapse_manager.synapse_type, self._additional_input
        ]:
            if hasattr(obj, key):
                setattr(obj, key, value)
                self._change_requires_mapping = True
                return
        raise Exception("Type {} does not have parameter {}".format(
            self._model_name, key))

    @property
    def weight_scale(self):
        return self._input_type.get_global_weight_scale()

    @property
    def ring_buffer_sigma(self):
        return self._synapse_manager.ring_buffer_sigma

    @ring_buffer_sigma.setter
    def ring_buffer_sigma(self, ring_buffer_sigma):
        self._synapse_manager.ring_buffer_sigma = ring_buffer_sigma

    @property
    def spikes_per_second(self):
        return self._synapse_manager.spikes_per_second

    @spikes_per_second.setter
    def spikes_per_second(self, spikes_per_second):
        self._synapse_manager.spikes_per_second = spikes_per_second

    @property
    def synapse_dynamics(self):
        return self._synapse_manager.synapse_dynamics

    @synapse_dynamics.setter
    def synapse_dynamics(self, synapse_dynamics):
        self._synapse_manager.synapse_dynamics = synapse_dynamics

    def add_pre_run_connection_holder(self, connection_holder, edge,
                                      synapse_info):
        self._synapse_manager.add_pre_run_connection_holder(
            connection_holder, edge, synapse_info)

    def get_connections_from_machine(self, transceiver, placement, subedge,
                                     graph_mapper, routing_infos, synapse_info,
                                     partitioned_graph):
        return self._synapse_manager.get_connections_from_machine(
            transceiver, placement, subedge, graph_mapper, routing_infos,
            synapse_info, partitioned_graph)

    def is_data_specable(self):
        return True

    def get_incoming_partition_constraints(self, partition, graph_mapper):
        """ Gets the constraints for partitions going into this vertex

        :param partition: partition that goes into this vertex
        :param graph_mapper: the graph mapper object
        :return: list of constraints
        """
        return self._synapse_manager.get_incoming_partition_constraints()

    def get_outgoing_partition_constraints(self, partition, graph_mapper):
        """ Gets the constraints for partitions going out of this vertex
        :param partition: the partition that leaves this vertex
        :param graph_mapper: the graph mapper object
        :return: list of constraints
        """
        return [KeyAllocatorContiguousRangeContraint()]

    def __str__(self):
        return "{} with {} atoms".format(self._label, self.n_atoms)

    def __repr__(self):
        return self.__str__()
class SpikeSourcePoisson(
        AbstractPartitionableVertex, AbstractDataSpecableVertex,
        AbstractSpikeRecordable, AbstractProvidesOutgoingEdgeConstraints):
    """
    This class represents a Poisson Spike source object, which can represent
    a pynn_population.py of virtual neurons each with its own parameters.
    """

    _POISSON_SPIKE_SOURCE_REGIONS = Enum(
        value="_POISSON_SPIKE_SOURCE_REGIONS",
        names=[('SYSTEM_REGION', 0),
               ('POISSON_PARAMS_REGION', 1),
               ('SPIKE_HISTORY_REGION', 2)])

    # Technically, this is ~2900 in terms of DTCM, but is timescale dependent
    # in terms of CPU (2900 at 10 times slowdown is fine, but not at realtime)
    _model_based_max_atoms_per_core = 500

    def __init__(self, n_neurons, machine_time_step, timescale_factor,
                 constraints=None, label="SpikeSourcePoisson",
                 rate=1.0, start=0.0, duration=None, seed=None):
        """
        Creates a new SpikeSourcePoisson Object.
        """
        AbstractPartitionableVertex.__init__(
            self, n_atoms=n_neurons, label=label, constraints=constraints,
            max_atoms_per_core=self._model_based_max_atoms_per_core)
        AbstractDataSpecableVertex.__init__(
            self, machine_time_step=machine_time_step,
            timescale_factor=timescale_factor)
        AbstractSpikeRecordable.__init__(self)

        # Store the parameters
        self._rate = rate
        self._start = start
        self._duration = duration
        self._rng = numpy.random.RandomState(seed)

        # Prepare for recording, and to get spikes
        self._spike_recorder = SpikeRecorder(machine_time_step)

        self._outgoing_edge_key_restrictor = \
            OutgoingEdgeSameContiguousKeysRestrictor()

    @property
    def rate(self):
        return self._rate

    @rate.setter
    def rate(self, rate):
        self._rate = rate

    @property
    def start(self):
        return self._start

    @start.setter
    def start(self, start):
        self._start = start

    @property
    def duration(self):
        return self._duration

    @duration.setter
    def duration(self, duration):
        self._duration = duration

    @property
    def seed(self):
        return self._seed

    @seed.setter
    def seed(self, seed):
        self._seed = seed

    @property
    def model_name(self):
        """
        Return a string representing a label for this class.
        """
        return "SpikeSourcePoisson"

    @staticmethod
    def set_model_max_atoms_per_core(new_value):
        """

        :param new_value:
        :return:
        """
        SpikeSourcePoisson._model_based_max_atoms_per_core = new_value

    @staticmethod
    def get_params_bytes(vertex_slice):
        """
        Gets the size of the possion parameters in bytes
        :param vertex_slice:
        """
        return (RANDOM_SEED_WORDS + PARAMS_BASE_WORDS +
                (((vertex_slice.hi_atom - vertex_slice.lo_atom) + 1) *
                 PARAMS_WORDS_PER_NEURON)) * 4

    def reserve_memory_regions(self, spec, setup_sz, poisson_params_sz,
                               spike_hist_buff_sz):
        """
        Reserve memory regions for poisson source parameters
        and output buffer.
        :param spec:
        :param setup_sz:
        :param poisson_params_sz:
        :param spike_hist_buff_sz:
        :return:
        """
        spec.comment("\nReserving memory space for data regions:\n\n")

        # Reserve memory:
        spec.reserve_memory_region(
            region=self._POISSON_SPIKE_SOURCE_REGIONS.SYSTEM_REGION.value,
            size=setup_sz, label='setup')
        spec.reserve_memory_region(
            region=self._POISSON_SPIKE_SOURCE_REGIONS
                       .POISSON_PARAMS_REGION.value,
            size=poisson_params_sz, label='PoissonParams')
        if spike_hist_buff_sz > 0:
            spec.reserve_memory_region(
                region=self._POISSON_SPIKE_SOURCE_REGIONS
                           .SPIKE_HISTORY_REGION.value,
                size=spike_hist_buff_sz, label='spikeHistBuffer',
                empty=True)

    def write_setup_info(self, spec, spike_history_region_sz):
        """
        Write information used to control the simulationand gathering of
        results.
        Currently, this means the flag word used to signal whether information
        on neuron firing and neuron potential is either stored locally in a
        buffer or
        passed out of the simulation for storage/display as the simulation
        proceeds.

        The format of the information is as follows:
        Word 0: Flags selecting data to be gathered during simulation.
            Bit 0: Record spike history

        :param spec:
        :param spike_history_region_sz:
        :return:
        """

        self._write_basic_setup_info(
            spec, self._POISSON_SPIKE_SOURCE_REGIONS.SYSTEM_REGION.value)
        recording_info = 0
        if self._spike_recorder.record:
            recording_info |= constants.RECORD_SPIKE_BIT
        recording_info |= 0xBEEF0000

        # Write this to the system region (to be picked up by the simulation):
        spec.write_value(data=recording_info)
        spec.write_value(data=spike_history_region_sz)

    def write_poisson_parameters(self, spec, key, num_neurons):
        """
        Generate Neuron Parameter data for Poisson spike sources (region 2):
        :param spec:
        :param key:
        :param num_neurons:
        :return:
        """
        spec.comment("\nWriting Neuron Parameters for {} poisson sources:\n"
                     .format(num_neurons))

        # Set the focus to the memory region 2 (neuron parameters):
        spec.switch_write_focus(
            region=self._POISSON_SPIKE_SOURCE_REGIONS
                       .POISSON_PARAMS_REGION.value)

        # Write header info to the memory region:

        # Write Key info for this core:
        if key is None:
            # if theres no key, then two falses will cover it.
            spec.write_value(data=0)
            spec.write_value(data=0)
        else:
            # has a key, thus set has key to 1 and then add key
            spec.write_value(data=1)
            spec.write_value(data=key)

        # Write the random seed (4 words), generated randomly!
        spec.write_value(data=self._rng.randint(0x7FFFFFFF))
        spec.write_value(data=self._rng.randint(0x7FFFFFFF))
        spec.write_value(data=self._rng.randint(0x7FFFFFFF))
        spec.write_value(data=self._rng.randint(0x7FFFFFFF))

        # For each neuron, get the rate to work out if it is a slow
        # or fast source
        slow_sources = list()
        fast_sources = list()
        for i in range(0, num_neurons):

            # Get the parameter values for source i:
            rate_val = generate_parameter(self._rate, i)
            start_val = generate_parameter(self._start, i)
            end_val = None
            if self._duration is not None:
                end_val = generate_parameter(self._duration, i) + start_val

            # Decide if it is a fast or slow source and
            spikes_per_tick = \
                (float(rate_val) * (self._machine_time_step / 1000000.0))
            if spikes_per_tick <= SLOW_RATE_PER_TICK_CUTOFF:
                slow_sources.append([i, rate_val, start_val, end_val])
            else:
                fast_sources.append([i, spikes_per_tick, start_val, end_val])

        # Write the numbers of each type of source
        spec.write_value(data=len(slow_sources))
        spec.write_value(data=len(fast_sources))

        # Now write one struct for each slow source as follows
        #
        #   typedef struct slow_spike_source_t
        #   {
        #     uint32_t neuron_id;
        #     uint32_t start_ticks;
        #     uint32_t end_ticks;
        #
        #     accum mean_isi_ticks;
        #     accum time_to_spike_ticks;
        #   } slow_spike_source_t;
        for (neuron_id, rate_val, start_val, end_val) in slow_sources:
            if rate_val == 0:
                isi_val = 0
            else:
                isi_val = float(1000000.0 /
                                (rate_val * self._machine_time_step))
            start_scaled = int(start_val * 1000.0 / self._machine_time_step)
            end_scaled = 0xFFFFFFFF
            if end_val is not None:
                end_scaled = int(end_val * 1000.0 / self._machine_time_step)
            spec.write_value(data=neuron_id, data_type=DataType.UINT32)
            spec.write_value(data=start_scaled, data_type=DataType.UINT32)
            spec.write_value(data=end_scaled, data_type=DataType.UINT32)
            spec.write_value(data=isi_val, data_type=DataType.S1615)
            spec.write_value(data=0x0, data_type=DataType.UINT32)

        # Now write
        #   typedef struct fast_spike_source_t
        #   {
        #     uint32_t neuron_id;
        #     uint32_t start_ticks;
        #     uint32_t end_ticks;
        #
        #     unsigned long fract exp_minus_lambda;
        #   } fast_spike_source_t;
        for (neuron_id, spikes_per_tick, start_val, end_val) in fast_sources:
            if spikes_per_tick == 0:
                exp_minus_lamda = 0
            else:
                exp_minus_lamda = math.exp(-1.0 * spikes_per_tick)
            start_scaled = int(start_val * 1000.0 / self._machine_time_step)
            end_scaled = 0xFFFFFFFF
            if end_val is not None:
                end_scaled = int(end_val * 1000.0 / self._machine_time_step)
            spec.write_value(data=neuron_id, data_type=DataType.UINT32)
            spec.write_value(data=start_scaled, data_type=DataType.UINT32)
            spec.write_value(data=end_scaled, data_type=DataType.UINT32)
            spec.write_value(data=exp_minus_lamda, data_type=DataType.U032)

    def is_recording_spikes(self):
        return self._spike_recorder.record

    def set_recording_spikes(self):
        self._spike_recorder.record = True

    # inherited from partionable vertex
    def get_sdram_usage_for_atoms(self, vertex_slice, graph):
        """
        method for calculating sdram usage
        :param vertex_slice:
        :param graph:
        :return:
        """
        poisson_params_sz = self.get_params_bytes(vertex_slice)
        spike_hist_buff_sz = \
            self._spike_recorder.get_sdram_usage_in_bytes(
                vertex_slice.n_atoms, self._no_machine_time_steps)
        return ((constants.DATA_SPECABLE_BASIC_SETUP_INFO_N_WORDS * 4) + 8 +
                poisson_params_sz + spike_hist_buff_sz)

    def get_dtcm_usage_for_atoms(self, vertex_slice, graph):
        """
        method for calculating dtcm usage for a collection of atoms
        :param vertex_slice:
        :param graph:
        :return:
        """
        return 0

    def get_cpu_usage_for_atoms(self, vertex_slice, graph):
        """
        Gets the CPU requirements for a range of atoms

        :param vertex_slice:
        :param graph:
        :return:
        """
        return 0

    # inherited from dataspecable vertex
    def generate_data_spec(self, subvertex, placement, subgraph, graph,
                           routing_info, hostname, graph_mapper, report_folder,
                           ip_tags, reverse_ip_tags, write_text_specs,
                           application_run_time_folder):
        """
        Model-specific construction of the data blocks necessary to build a
        single SpikeSourcePoisson on one core.
        :param subvertex:
        :param placement:
        :param subgraph:
        :param graph:
        :param routing_info:
        :param hostname:
        :param graph_mapper:
        :param report_folder:
        :param ip_tags:
        :param reverse_ip_tags:
        :param write_text_specs:
        :param application_run_time_folder:
        :return:
        """
        data_writer, report_writer = \
            self.get_data_spec_file_writers(
                placement.x, placement.y, placement.p, hostname, report_folder,
                write_text_specs, application_run_time_folder)

        spec = DataSpecificationGenerator(data_writer, report_writer)

        vertex_slice = graph_mapper.get_subvertex_slice(subvertex)

        spike_hist_buff_sz = self._spike_recorder.get_sdram_usage_in_bytes(
            vertex_slice.n_atoms, self._no_machine_time_steps)

        spec.comment("\n*** Spec for SpikeSourcePoisson Instance ***\n\n")

        # Basic setup plus 8 bytes for recording flags and recording size
        setup_sz = ((constants.DATA_SPECABLE_BASIC_SETUP_INFO_N_WORDS * 4) + 8)

        poisson_params_sz = self.get_params_bytes(vertex_slice)

        # Reserve SDRAM space for memory areas:
        self.reserve_memory_regions(
            spec, setup_sz, poisson_params_sz, spike_hist_buff_sz)

        self.write_setup_info(spec, spike_hist_buff_sz)

        # Every subedge should have the same key
        key = None
        subedges = subgraph.outgoing_subedges_from_subvertex(subvertex)
        if len(subedges) > 0:
            keys_and_masks = routing_info.get_keys_and_masks_from_subedge(
                subedges[0])
            key = keys_and_masks[0].key

        self.write_poisson_parameters(spec, key, vertex_slice.n_atoms)

        # End-of-Spec:
        spec.end_specification()
        data_writer.close()

    def get_binary_file_name(self):
        """

        :return:
        """
        return "spike_source_poisson.aplx"

    def get_spikes(self, transceiver, n_machine_time_steps, placements,
                   graph_mapper):
        return self._spike_recorder.get_spikes(
            self._label, transceiver,
            self._POISSON_SPIKE_SOURCE_REGIONS.SPIKE_HISTORY_REGION.value,
            n_machine_time_steps, placements, graph_mapper, self)

    def get_outgoing_edge_constraints(self, partitioned_edge, graph_mapper):
        """
        gets the constraints for edges going out of this vertex
        :param partitioned_edge: the parittioned edge that leaves this vertex
        :param graph_mapper: the graph mapper object
        :return: list of constraints
        """
        return self._outgoing_edge_key_restrictor.get_outgoing_edge_constraints(
            partitioned_edge, graph_mapper)

    def is_data_specable(self):
        """
        helper method for isinstance
        :return:
        """
        return True

    def get_value(self, key):
        """ Get a property of the overall model
        """
        if hasattr(self, key):
            return getattr(self, key)
        raise Exception("Population {} does not have parameter {}".format(
            self, key))