def add_database_socket_address(
            database_notify_host, database_notify_port_num,
            database_ack_port_num):
        config = get_simulator().config
        if database_notify_port_num is None:
            database_notify_port_num = helpful_functions.read_config_int(
                config, "Database", "notify_port")
        if database_notify_host is None:
            database_notify_host = helpful_functions.read_config(
                config, "Database", "notify_hostname")
        elif database_notify_host == "0.0.0.0":
            database_notify_host = "localhost"
        if database_ack_port_num is None:
            database_ack_port_num = helpful_functions.read_config_int(
                config, "Database", "listen_port")

        # build the database socket address used by the notification interface
        database_socket = SocketAddress(
            listen_port=database_ack_port_num,
            notify_host_name=database_notify_host,
            notify_port_no=database_notify_port_num)

        # update socket interface with new demands.
        SpynnakerExternalDevicePluginManager.add_socket_address(
            database_socket)
Esempio n. 2
0
    def add_database_socket_address(database_notify_host,
                                    database_notify_port_num,
                                    database_ack_port_num):
        config = get_simulator().config
        if database_notify_port_num is None:
            database_notify_port_num = helpful_functions.read_config_int(
                config, "Database", "notify_port")
        if database_notify_host is None:
            database_notify_host = helpful_functions.read_config(
                config, "Database", "notify_hostname")
        elif database_notify_host == "0.0.0.0":
            database_notify_host = "localhost"
        if database_ack_port_num is None:
            database_ack_port_num = helpful_functions.read_config_int(
                config, "Database", "listen_port")

        # build the database socket address used by the notification interface
        database_socket = SocketAddress(
            listen_port=database_ack_port_num,
            notify_host_name=database_notify_host,
            notify_port_no=database_notify_port_num)

        # update socket interface with new demands.
        SpynnakerExternalDevicePluginManager.add_socket_address(
            database_socket)
Esempio n. 3
0
    def __init__(self,
                 n_neurons,
                 constraints=none_pynn_default_parameters['constraints'],
                 label=none_pynn_default_parameters['label'],
                 rate=default_parameters['rate'],
                 start=default_parameters['start'],
                 duration=default_parameters['duration'],
                 seed=none_pynn_default_parameters['seed']):
        ApplicationVertex.__init__(self, label, constraints,
                                   self._model_based_max_atoms_per_core)
        AbstractSpikeRecordable.__init__(self)
        AbstractProvidesOutgoingPartitionConstraints.__init__(self)
        AbstractChangableAfterRun.__init__(self)
        SimplePopulationSettable.__init__(self)
        ProvidesKeyToAtomMappingImpl.__init__(self)

        config = globals_variables.get_simulator().config

        # atoms params
        self._n_atoms = n_neurons
        self._seed = None

        # check for changes parameters
        self._change_requires_mapping = True
        self._change_requires_neuron_parameters_reload = False

        # Store the parameters
        self._rate = utility_calls.convert_param_to_numpy(rate, n_neurons)
        self._start = utility_calls.convert_param_to_numpy(start, n_neurons)
        self._duration = utility_calls.convert_param_to_numpy(
            duration, n_neurons)
        self._time_to_spike = utility_calls.convert_param_to_numpy(
            0, n_neurons)
        self._rng = numpy.random.RandomState(seed)
        self._machine_time_step = None

        # Prepare for recording, and to get spikes
        self._spike_recorder = MultiSpikeRecorder()
        self._time_between_requests = config.getint("Buffers",
                                                    "time_between_requests")
        self._receive_buffer_host = config.get("Buffers",
                                               "receive_buffer_host")
        self._receive_buffer_port = helpful_functions.read_config_int(
            config, "Buffers", "receive_buffer_port")
        self._minimum_buffer_sdram = config.getint("Buffers",
                                                   "minimum_buffer_sdram")
        self._using_auto_pause_and_resume = config.getboolean(
            "Buffers", "use_auto_pause_and_resume")

        spike_buffer_max_size = 0
        self._buffer_size_before_receive = None
        if config.getboolean("Buffers", "enable_buffered_recording"):
            spike_buffer_max_size = config.getint("Buffers",
                                                  "spike_buffer_size")
            self._buffer_size_before_receive = config.getint(
                "Buffers", "buffer_size_before_receive")
        self._maximum_sdram_for_buffering = [spike_buffer_max_size]
    def __init__(self,
                 n_neurons,
                 constraints=non_pynn_default_parameters['constraints'],
                 label=non_pynn_default_parameters['label'],
                 rate=default_parameters['rate'],
                 start=default_parameters['start'],
                 duration=default_parameters['duration'],
                 seed=non_pynn_default_parameters['seed']):
        # pylint: disable=too-many-arguments
        super(SpikeSourcePoisson,
              self).__init__(label, constraints,
                             self._model_based_max_atoms_per_core)

        config = globals_variables.get_simulator().config

        # atoms params
        self._n_atoms = n_neurons
        self._model_name = "SpikeSourcePoisson"
        self._seed = None

        # check for changes parameters
        self._change_requires_mapping = True
        self._change_requires_neuron_parameters_reload = False

        # Store the parameters
        self._rate = utility_calls.convert_param_to_numpy(rate, n_neurons)
        self._start = utility_calls.convert_param_to_numpy(start, n_neurons)
        self._duration = utility_calls.convert_param_to_numpy(
            duration, n_neurons)
        self._time_to_spike = utility_calls.convert_param_to_numpy(
            0, n_neurons)
        self._rng = numpy.random.RandomState(seed)
        self._machine_time_step = None

        # Prepare for recording, and to get spikes
        self._spike_recorder = MultiSpikeRecorder()
        self._time_between_requests = config.getint("Buffers",
                                                    "time_between_requests")
        self._receive_buffer_host = config.get("Buffers",
                                               "receive_buffer_host")
        self._receive_buffer_port = helpful_functions.read_config_int(
            config, "Buffers", "receive_buffer_port")
        self._minimum_buffer_sdram = config.getint("Buffers",
                                                   "minimum_buffer_sdram")
        self._using_auto_pause_and_resume = config.getboolean(
            "Buffers", "use_auto_pause_and_resume")

        spike_buffer_max_size = 0
        self._buffer_size_before_receive = None
        if config.getboolean("Buffers", "enable_buffered_recording"):
            spike_buffer_max_size = config.getint("Buffers",
                                                  "spike_buffer_size")
            self._buffer_size_before_receive = config.getint(
                "Buffers", "buffer_size_before_receive")
        self._maximum_sdram_for_buffering = [spike_buffer_max_size]
    def __init__(self, n_neurons, label, constraints, max_atoms_per_core,
                 spikes_per_second, ring_buffer_sigma,
                 incoming_spike_buffer_size, neuron_impl, pynn_model):
        # pylint: disable=too-many-arguments, too-many-locals
        super(AbstractPopulationVertex, self).__init__(label, constraints,
                                                       max_atoms_per_core)

        self.__n_atoms = n_neurons
        self.__n_subvertices = 0
        self.__n_data_specs = 0

        # buffer data
        self.__incoming_spike_buffer_size = incoming_spike_buffer_size

        # get config from simulator
        config = globals_variables.get_simulator().config

        if incoming_spike_buffer_size is None:
            self.__incoming_spike_buffer_size = config.getint(
                "Simulation", "incoming_spike_buffer_size")

        self.__neuron_impl = neuron_impl
        self.__pynn_model = pynn_model
        self._parameters = SpynnakerRangeDictionary(n_neurons)
        self._state_variables = SpynnakerRangeDictionary(n_neurons)
        self.__neuron_impl.add_parameters(self._parameters)
        self.__neuron_impl.add_state_variables(self._state_variables)
        self.__initial_state_variables = None
        self.__updated_state_variables = set()

        # Set up for recording
        recordable_variables = list(
            self.__neuron_impl.get_recordable_variables())
        record_data_types = dict(
            self.__neuron_impl.get_recordable_data_types())
        self.__neuron_recorder = NeuronRecorder(recordable_variables,
                                                record_data_types,
                                                [NeuronRecorder.SPIKES],
                                                n_neurons)

        # Set up synapse handling
        self.__synapse_manager = SynapticManager(
            self.__neuron_impl.get_n_synapse_types(), ring_buffer_sigma,
            spikes_per_second, config)

        # bool for if state has changed.
        self.__change_requires_mapping = True
        self.__change_requires_neuron_parameters_reload = False
        self.__change_requires_data_generation = False
        self.__has_reset_last = True

        # Set up for profiling
        self.__n_profile_samples = helpful_functions.read_config_int(
            config, "Reports", "n_profile_samples")
    def get_resources(
            n_machine_time_steps, time_step, time_scale_factor,
            n_samples_per_recording, sampling_frequency):
        """ Get the resources used by this vertex

        :return: Resource container
        """
        # pylint: disable=too-many-locals

        # get config
        config = globals_variables.get_simulator().config

        # get recording params
        minimum_buffer_sdram = config.getint(
            "Buffers", "minimum_buffer_sdram")
        using_auto_pause_and_resume = config.getboolean(
            "Buffers", "use_auto_pause_and_resume")
        receive_buffer_host = config.get("Buffers", "receive_buffer_host")
        receive_buffer_port = read_config_int(
            config, "Buffers", "receive_buffer_port")

        # figure recording size for max run
        if not using_auto_pause_and_resume and n_machine_time_steps is None:
            raise Exception(
                "You cannot use the chip power montiors without auto pause "
                "and resume and not allocating a n_machine_time_steps")

        # figure max buffer size
        max_buffer_size = 0
        if config.getboolean("Buffers", "enable_buffered_recording"):
            max_buffer_size = config.getint(
                "Buffers", "chip_power_monitor_buffer")

        maximum_sdram_for_buffering = [max_buffer_size]

        n_recording_entries = (math.ceil(
            (sampling_frequency / (time_step * time_scale_factor))) /
            n_samples_per_recording)

        recording_size = (
            ChipPowerMonitorMachineVertex.RECORDING_SIZE_PER_ENTRY *
            n_recording_entries)

        container = ResourceContainer(
            sdram=SDRAMResource(
                ChipPowerMonitorMachineVertex.sdram_calculation()),
            cpu_cycles=CPUCyclesPerTickResource(100),
            dtcm=DTCMResource(100))
        recording_sizes = recording_utilities.get_recording_region_sizes(
            [int(recording_size) * n_machine_time_steps], minimum_buffer_sdram,
            maximum_sdram_for_buffering, using_auto_pause_and_resume)
        container.extend(recording_utilities.get_recording_resources(
            recording_sizes, receive_buffer_host, receive_buffer_port))
        return container
Esempio n. 7
0
    def __init__(self, n_neurons, constraints, label, rate, start, duration,
                 seed, max_atoms_per_core, model):
        # pylint: disable=too-many-arguments
        super(SpikeSourcePoissonVertex, self).__init__(label, constraints,
                                                       max_atoms_per_core)

        config = globals_variables.get_simulator().config

        # atoms params
        self._n_atoms = n_neurons
        self._model_name = "SpikeSourcePoisson"
        self._model = model
        self._seed = seed
        self._kiss_seed = dict()
        self._rng = None
        self._n_subvertices = 0
        self._n_data_specs = 0

        # check for changes parameters
        self._change_requires_mapping = True
        self._change_requires_neuron_parameters_reload = False

        # Store the parameters
        self._rate = utility_calls.convert_param_to_numpy(rate, n_neurons)
        self._rate_change = numpy.zeros(self._rate.size)
        self._start = utility_calls.convert_param_to_numpy(start, n_neurons)
        self._duration = utility_calls.convert_param_to_numpy(
            duration, n_neurons)
        self._time_to_spike = utility_calls.convert_param_to_numpy(
            0, n_neurons)
        self._machine_time_step = None

        # Prepare for recording, and to get spikes
        self._spike_recorder = MultiSpikeRecorder()
        self._time_between_requests = config.getint("Buffers",
                                                    "time_between_requests")
        self._receive_buffer_host = config.get("Buffers",
                                               "receive_buffer_host")
        self._receive_buffer_port = helpful_functions.read_config_int(
            config, "Buffers", "receive_buffer_port")
        self._minimum_buffer_sdram = config.getint("Buffers",
                                                   "minimum_buffer_sdram")
        self._using_auto_pause_and_resume = config.getboolean(
            "Buffers", "use_auto_pause_and_resume")

        spike_buffer_max_size = 0
        self._buffer_size_before_receive = None
        if config.getboolean("Buffers", "enable_buffered_recording"):
            spike_buffer_max_size = config.getint("Buffers",
                                                  "spike_buffer_size")
            self._buffer_size_before_receive = config.getint(
                "Buffers", "buffer_size_before_receive")
        self._maximum_sdram_for_buffering = [spike_buffer_max_size]
    def get_resources(n_machine_time_steps, time_step, time_scale_factor,
                      n_samples_per_recording, sampling_frequency):
        """ Get the resources used by this vertex

        :return: Resource container
        """
        # pylint: disable=too-many-locals

        # get config
        config = globals_variables.get_simulator().config

        # get recording params
        minimum_buffer_sdram = config.getint("Buffers", "minimum_buffer_sdram")
        using_auto_pause_and_resume = config.getboolean(
            "Buffers", "use_auto_pause_and_resume")
        receive_buffer_host = config.get("Buffers", "receive_buffer_host")
        receive_buffer_port = read_config_int(config, "Buffers",
                                              "receive_buffer_port")

        # figure recording size for max run
        if not using_auto_pause_and_resume and n_machine_time_steps is None:
            raise Exception(
                "You cannot use the chip power montiors without auto pause "
                "and resume and not allocating a n_machine_time_steps")

        # figure max buffer size
        max_buffer_size = 0
        if config.getboolean("Buffers", "enable_buffered_recording"):
            max_buffer_size = config.getint("Buffers",
                                            "chip_power_monitor_buffer")

        maximum_sdram_for_buffering = [max_buffer_size]

        n_recording_entries = (math.ceil(
            (sampling_frequency / (time_step * time_scale_factor))) /
                               n_samples_per_recording)

        recording_size = (
            ChipPowerMonitorMachineVertex.RECORDING_SIZE_PER_ENTRY *
            n_recording_entries)

        container = ResourceContainer(sdram=SDRAMResource(
            ChipPowerMonitorMachineVertex.sdram_calculation()),
                                      cpu_cycles=CPUCyclesPerTickResource(100),
                                      dtcm=DTCMResource(100))
        recording_sizes = recording_utilities.get_recording_region_sizes(
            [int(recording_size) * n_machine_time_steps], minimum_buffer_sdram,
            maximum_sdram_for_buffering, using_auto_pause_and_resume)
        container.extend(
            recording_utilities.get_recording_resources(
                recording_sizes, receive_buffer_host, receive_buffer_port))
        return container
    def add_database_socket_address(database_notify_host,
                                    database_notify_port_num,
                                    database_ack_port_num):
        """
        :param database_notify_host:
            Host to talk to tell that the database (and application) is ready.
        :type database_notify_host: str or None
        :param database_notify_port_num:
            Port to talk to tell that the database (and application) is ready.
        :type database_notify_port_num: int or None
        :param database_ack_port_num:
            Port on which to listen for an acknowledgement that the
            simulation should start.
        :type database_ack_port_num: int or None
        """
        config = get_simulator().config
        if database_notify_port_num is None:
            database_notify_port_num = helpful_functions.read_config_int(
                config, "Database", "notify_port")
        if database_notify_host is None:
            database_notify_host = helpful_functions.read_config(
                config, "Database", "notify_hostname")
        elif database_notify_host == "0.0.0.0":
            database_notify_host = "localhost"
        if database_ack_port_num is None:
            database_ack_port_num = helpful_functions.read_config_int(
                config, "Database", "listen_port")

        # build the database socket address used by the notification interface
        database_socket = SocketAddress(
            listen_port=database_ack_port_num,
            notify_host_name=database_notify_host,
            notify_port_no=database_notify_port_num)

        # update socket interface with new demands.
        SpynnakerExternalDevicePluginManager.add_socket_address(
            database_socket)
    def __init__(self, label, state):
        MachineVertex.__init__(self, label)

        config = globals_variables.get_simulator().config
        self._buffer_size_before_receive = None
        if config.getboolean("Buffers", "enable_buffered_recording"):
            self._buffer_size_before_receive = config.getint(
                "Buffers", "buffer_size_before_receive")
        self._time_between_requests = config.getint("Buffers",
                                                    "time_between_requests")
        self._receive_buffer_host = config.get("Buffers",
                                               "receive_buffer_host")
        self._receive_buffer_port = helpful_functions.read_config_int(
            config, "Buffers", "receive_buffer_port")

        # app specific data items
        self._state = state
Esempio n. 11
0
    def __init__(self, label, state):
        super(ConwayBasicCell, self).__init__(label, "conways_cell.aplx")

        config = globals_variables.get_simulator().config
        self._buffer_size_before_receive = None
        if config.getboolean("Buffers", "enable_buffered_recording"):
            self._buffer_size_before_receive = config.getint(
                "Buffers", "buffer_size_before_receive")
        self._time_between_requests = config.getint("Buffers",
                                                    "time_between_requests")
        self._receive_buffer_host = config.get("Buffers",
                                               "receive_buffer_host")
        self._receive_buffer_port = read_config_int(config, "Buffers",
                                                    "receive_buffer_port")

        # app specific data items
        self._state = bool(state)
    def __init__(
            self, n_neurons, label, constraints, max_atoms_per_core,
            spikes_per_second, ring_buffer_sigma, incoming_spike_buffer_size,
            neuron_impl, pynn_model):
        # pylint: disable=too-many-arguments, too-many-locals
        super(AbstractPopulationVertex, self).__init__(
            label, constraints, max_atoms_per_core)

        self._n_atoms = n_neurons
        self._n_subvertices = 0
        self._n_data_specs = 0

        # buffer data
        self._incoming_spike_buffer_size = incoming_spike_buffer_size

        # get config from simulator
        config = globals_variables.get_simulator().config

        if incoming_spike_buffer_size is None:
            self._incoming_spike_buffer_size = config.getint(
                "Simulation", "incoming_spike_buffer_size")

        self._neuron_impl = neuron_impl
        self._pynn_model = pynn_model
        self._parameters = SpynnakerRangeDictionary(n_neurons)
        self._state_variables = SpynnakerRangeDictionary(n_neurons)
        self._neuron_impl.add_parameters(self._parameters)
        self._neuron_impl.add_state_variables(self._state_variables)

        # Set up for recording
        recordables = ["spikes"]
        recordables.extend(self._neuron_impl.get_recordable_variables())
        self._neuron_recorder = NeuronRecorder(recordables, n_neurons)

        # Set up synapse handling
        self._synapse_manager = SynapticManager(
            self._neuron_impl.get_n_synapse_types(), ring_buffer_sigma,
            spikes_per_second, config)

        # bool for if state has changed.
        self._change_requires_mapping = True
        self._change_requires_neuron_parameters_reload = False

        # Set up for profiling
        self._n_profile_samples = helpful_functions.read_config_int(
            config, "Reports", "n_profile_samples")
Esempio n. 13
0
    def get_resources(n_machine_time_steps, time_step, time_scale_factor,
                      n_samples_per_recording, sampling_frequency):
        """ get resources used by this vertex

        :return:Resource container
        """
        # get config
        config = globals_variables.get_simulator().config

        # get recording params
        minimum_buffer_sdram = config.getint("Buffers", "minimum_buffer_sdram")
        using_auto_pause_and_resume = config.getboolean(
            "Buffers", "use_auto_pause_and_resume")
        receive_buffer_host = config.get("Buffers", "receive_buffer_host")
        receive_buffer_port = helpful_functions.read_config_int(
            config, "Buffers", "receive_buffer_port")

        # figure max buffer size
        max_buffer_size = 0
        if config.getboolean("Buffers", "enable_buffered_recording"):
            max_buffer_size = config.getint("Buffers", "spike_buffer_size")
        maximum_sdram_for_buffering = [max_buffer_size]

        # figure recording size for max run
        if n_machine_time_steps is None:
            n_machine_time_steps = 1.
        n_recording_entries = math.ceil(
            (sampling_frequency /
             (n_machine_time_steps * time_step * time_scale_factor)) /
            n_samples_per_recording)
        recording_size = (
            ChipPowerMonitorMachineVertex.RECORDING_SIZE_PER_ENTRY *
            n_recording_entries)

        container = ResourceContainer(sdram=SDRAMResource(
            ChipPowerMonitorMachineVertex.sdram_calculation()),
                                      cpu_cycles=CPUCyclesPerTickResource(100),
                                      dtcm=DTCMResource(100))
        recording_sizes = recording_utilities.get_recording_region_sizes(
            [int(recording_size)], n_machine_time_steps, minimum_buffer_sdram,
            maximum_sdram_for_buffering, using_auto_pause_and_resume)
        container.extend(
            recording_utilities.get_recording_resources(
                recording_sizes, receive_buffer_host, receive_buffer_port))
        return container
Esempio n. 14
0
    def __init__(self, label, constraints=None):
        MachineVertex.__init__(self, label=label, constraints=constraints)

        config = globals_variables.get_simulator().config
        self._buffer_size_before_receive = None
        if config.getboolean("Buffers", "enable_buffered_recording"):
            self._buffer_size_before_receive = config.getint(
                "Buffers", "buffer_size_before_receive")
        self._time_between_requests = config.getint("Buffers",
                                                    "time_between_requests")
        self._receive_buffer_host = config.get("Buffers",
                                               "receive_buffer_host")
        self._receive_buffer_port = helpful_functions.read_config_int(
            config, "Buffers", "receive_buffer_port")

        self._string_data_size = 5000

        self.placement = None
Esempio n. 15
0
    def __init__(self,
                 label,
                 columns,
                 rows,
                 string_size,
                 num_string_cols,
                 entries,
                 initiate,
                 function_id,
                 state,
                 constraints=None):
        MachineVertex.__init__(self, label=label, constraints=constraints)

        config = globals_variables.get_simulator().config
        self._buffer_size_before_receive = None
        if config.getboolean("Buffers", "enable_buffered_recording"):
            self._buffer_size_before_receive = config.getint(
                "Buffers", "buffer_size_before_receive")
        self._time_between_requests = config.getint("Buffers",
                                                    "time_between_requests")
        self._receive_buffer_host = config.get("Buffers",
                                               "receive_buffer_host")
        self._receive_buffer_port = helpful_functions.read_config_int(
            config, "Buffers", "receive_buffer_port")
        '''
        all the data that will be transfered to the vertex'''
        self.columns = columns
        self.rows = rows
        self.string_size = string_size
        self.num_string_cols = num_string_cols
        self.entries = entries
        self.initiate = initiate
        self.function_id = function_id
        '''
        allocate space for entries and 24 bytes for the 6 integers that make up the header information'''
        self._input_data_size  = (string_size * rows * num_string_cols) + \
                                 (4           * rows * (columns - num_string_cols)) + 28
        self._output_data_size = 10 * 1000

        # app specific elements
        self.placement = None
        self.state = state
    def __init__(self, label, constraints=None):
        super(HelloWorldVertex, self).__init__(label,
                                               "hello_world.aplx",
                                               constraints=constraints)

        config = globals_variables.get_simulator().config
        self._buffer_size_before_receive = None
        if config.getboolean("Buffers", "enable_buffered_recording"):
            self._buffer_size_before_receive = config.getint(
                "Buffers", "buffer_size_before_receive")
        self._time_between_requests = config.getint("Buffers",
                                                    "time_between_requests")
        self._receive_buffer_host = config.get("Buffers",
                                               "receive_buffer_host")
        self._receive_buffer_port = read_config_int(config, "Buffers",
                                                    "receive_buffer_port")

        self._string_data_size = 5000

        self.placement = None
    def __init__(self, label, constraints=None):
        super(TemplateVertex,
              self).__init__(label=label,
                             binary_name="c_template_vertex.aplx",
                             constraints=constraints)

        self._recording_size = 5000

        config = globals_variables.get_simulator().config
        self._buffer_size_before_receive = None
        if config.getboolean("Buffers", "enable_buffered_recording"):
            self._buffer_size_before_receive = config.getint(
                "Buffers", "buffer_size_before_receive")
        self._time_between_requests = config.getint("Buffers",
                                                    "time_between_requests")
        self._receive_buffer_host = config.get("Buffers",
                                               "receive_buffer_host")
        self._receive_buffer_port = read_config_int(config, "Buffers",
                                                    "receive_buffer_port")

        self.placement = None
    def __init__(self, n_neurons, constraints, label, rate, max_rate, start,
                 duration, seed, max_atoms_per_core, model):
        # pylint: disable=too-many-arguments
        super(SpikeSourcePoissonVertex, self).__init__(label, constraints,
                                                       max_atoms_per_core)

        # atoms params
        self.__n_atoms = n_neurons
        self.__model_name = "SpikeSourcePoisson"
        self.__model = model
        self.__seed = seed
        self.__kiss_seed = dict()
        self.__rng = None
        self.__n_subvertices = 0
        self.__n_data_specs = 0

        # check for changes parameters
        self.__change_requires_mapping = True
        self.__change_requires_neuron_parameters_reload = False

        self.__spike_recorder = MultiSpikeRecorder()

        # Store the parameters
        self.__max_rate = max_rate
        self.__rate = self.convert_rate(rate)
        self.__rate_change = numpy.zeros(self.__rate.size)
        self.__start = utility_calls.convert_param_to_numpy(start, n_neurons)
        self.__duration = utility_calls.convert_param_to_numpy(
            duration, n_neurons)
        self.__time_to_spike = utility_calls.convert_param_to_numpy(
            0, n_neurons)
        self.__machine_time_step = None

        # get config from simulator
        config = globals_variables.get_simulator().config
        self.__n_profile_samples = helpful_functions.read_config_int(
            config, "Reports", "n_profile_samples")

        # Prepare for recording, and to get spikes
        self.__spike_recorder = MultiSpikeRecorder()
    def __init__(self, n_neurons, label, constraints, max_atoms_per_core,
                 spikes_per_second, ring_buffer_sigma,
                 incoming_spike_buffer_size, neuron_impl, pynn_model):
        """
        :param int n_neurons: The number of neurons in the population
        :param str label: The label on the population
        :param list(~pacman.model.constraints.AbstractConstraint) constraints:
            Constraints on where a population's vertices may be placed.
        :param int max_atoms_per_core:
            The maximum number of atoms (neurons) per SpiNNaker core.
        :param spikes_per_second: Expected spike rate
        :type spikes_per_second: float or None
        :param ring_buffer_sigma:
            How many SD above the mean to go for upper bound of ring buffer \
            size; a good starting choice is 5.0. Given length of simulation \
            we can set this for approximate number of saturation events.
        :type ring_buffer_sigma: float or None
        :param incoming_spike_buffer_size:
        :type incoming_spike_buffer_size: int or None
        :param AbstractNeuronImpl neuron_impl:
            The (Python side of the) implementation of the neurons themselves.
        :param AbstractPyNNNeuronModel pynn_model:
            The PyNN neuron model that this vertex is working on behalf of.
        """

        # pylint: disable=too-many-arguments, too-many-locals
        ApplicationVertex.__init__(self, label, constraints,
                                   max_atoms_per_core)

        self.__n_atoms = n_neurons
        self.__n_subvertices = 0
        self.__n_data_specs = 0

        # buffer data
        self.__incoming_spike_buffer_size = incoming_spike_buffer_size

        # get config from simulator
        config = globals_variables.get_simulator().config

        if incoming_spike_buffer_size is None:
            self.__incoming_spike_buffer_size = config.getint(
                "Simulation", "incoming_spike_buffer_size")

        self.__neuron_impl = neuron_impl
        self.__pynn_model = pynn_model
        self._parameters = SpynnakerRangeDictionary(n_neurons)
        self._state_variables = SpynnakerRangeDictionary(n_neurons)
        self.__neuron_impl.add_parameters(self._parameters)
        self.__neuron_impl.add_state_variables(self._state_variables)
        self.__initial_state_variables = None
        self.__updated_state_variables = set()

        # Set up for recording
        recordable_variables = list(
            self.__neuron_impl.get_recordable_variables())
        record_data_types = dict(
            self.__neuron_impl.get_recordable_data_types())
        self.__neuron_recorder = NeuronRecorder(recordable_variables,
                                                record_data_types,
                                                [NeuronRecorder.SPIKES],
                                                n_neurons)

        # Set up synapse handling
        self.__synapse_manager = SynapticManager(
            self.__neuron_impl.get_n_synapse_types(), ring_buffer_sigma,
            spikes_per_second, config)

        # bool for if state has changed.
        self.__change_requires_mapping = True
        self.__change_requires_neuron_parameters_reload = False
        self.__change_requires_data_generation = False
        self.__has_reset_last = True

        # Set up for profiling
        self.__n_profile_samples = helpful_functions.read_config_int(
            config, "Reports", "n_profile_samples")
Esempio n. 20
0
    def __init__(self,
                 n_neurons,
                 binary,
                 label,
                 max_atoms_per_core,
                 spikes_per_second,
                 ring_buffer_sigma,
                 incoming_spike_buffer_size,
                 model_name,
                 neuron_model,
                 input_type,
                 synapse_type,
                 threshold_type,
                 additional_input=None,
                 constraints=None):
        # pylint: disable=too-many-arguments, too-many-locals
        super(AbstractPopulationVertex, self).__init__(label, constraints,
                                                       max_atoms_per_core)

        self._units = {
            'spikes': 'spikes',
            'v': 'mV',
            'gsyn_exc': "uS",
            'gsyn_inh': "uS"
        }

        self._binary = binary
        self._n_atoms = n_neurons

        # buffer data
        self._incoming_spike_buffer_size = incoming_spike_buffer_size

        # get config from simulator
        config = globals_variables.get_simulator().config

        if incoming_spike_buffer_size is None:
            self._incoming_spike_buffer_size = config.getint(
                "Simulation", "incoming_spike_buffer_size")

        self._model_name = model_name
        self._neuron_model = neuron_model
        self._input_type = input_type
        self._threshold_type = threshold_type
        self._additional_input = additional_input

        # Set up for recording
        self._neuron_recorder = NeuronRecorder(
            ["spikes", "v", "gsyn_exc", "gsyn_inh"], n_neurons)

        self._time_between_requests = config.getint("Buffers",
                                                    "time_between_requests")
        self._minimum_buffer_sdram = config.getint("Buffers",
                                                   "minimum_buffer_sdram")
        self._using_auto_pause_and_resume = config.getboolean(
            "Buffers", "use_auto_pause_and_resume")
        self._receive_buffer_host = config.get("Buffers",
                                               "receive_buffer_host")
        self._receive_buffer_port = helpful_functions.read_config_int(
            config, "Buffers", "receive_buffer_port")

        # If live buffering is enabled, set a maximum on the buffer sizes
        spike_buffer_max_size = 0
        v_buffer_max_size = 0
        gsyn_buffer_max_size = 0
        self._buffer_size_before_receive = None
        if config.getboolean("Buffers", "enable_buffered_recording"):
            spike_buffer_max_size = config.getint("Buffers",
                                                  "spike_buffer_size")
            v_buffer_max_size = config.getint("Buffers", "v_buffer_size")
            gsyn_buffer_max_size = config.getint("Buffers", "gsyn_buffer_size")
            self._buffer_size_before_receive = config.getint(
                "Buffers", "buffer_size_before_receive")

        self._maximum_sdram_for_buffering = [
            spike_buffer_max_size, v_buffer_max_size, gsyn_buffer_max_size,
            gsyn_buffer_max_size
        ]

        # Set up synapse handling
        self._synapse_manager = SynapticManager(synapse_type,
                                                ring_buffer_sigma,
                                                spikes_per_second, config)

        # bool for if state has changed.
        self._change_requires_mapping = True
        self._change_requires_neuron_parameters_reload = False

        # Set up for profiling
        self._n_profile_samples = helpful_functions.read_config_int(
            config, "Reports", "n_profile_samples")
    def __init__(self, n_neurons, label, constraints, max_atoms_per_core,
                 spikes_per_second, ring_buffer_sigma,
                 incoming_spike_buffer_size, neuron_impl, pynn_model):
        # pylint: disable=too-many-arguments, too-many-locals
        super(AbstractPopulationVertex, self).__init__(label, constraints,
                                                       max_atoms_per_core)

        self._n_atoms = n_neurons

        # buffer data
        self._incoming_spike_buffer_size = incoming_spike_buffer_size

        # get config from simulator
        config = globals_variables.get_simulator().config

        if incoming_spike_buffer_size is None:
            self._incoming_spike_buffer_size = config.getint(
                "Simulation", "incoming_spike_buffer_size")

        self._neuron_impl = neuron_impl
        self._pynn_model = pynn_model
        self._parameters = SpynnakerRangeDictionary(n_neurons)
        self._state_variables = SpynnakerRangeDictionary(n_neurons)
        self._neuron_impl.add_parameters(self._parameters)
        self._neuron_impl.add_state_variables(self._state_variables)

        # Set up for recording
        recordables = ["spikes"]
        recordables.extend(self._neuron_impl.get_recordable_variables())
        self._neuron_recorder = NeuronRecorder(recordables, n_neurons)

        self._time_between_requests = config.getint("Buffers",
                                                    "time_between_requests")
        self._minimum_buffer_sdram = config.getint("Buffers",
                                                   "minimum_buffer_sdram")
        self._using_auto_pause_and_resume = config.getboolean(
            "Buffers", "use_auto_pause_and_resume")
        self._receive_buffer_host = config.get("Buffers",
                                               "receive_buffer_host")
        self._receive_buffer_port = helpful_functions.read_config_int(
            config, "Buffers", "receive_buffer_port")

        # If live buffering is enabled, set a maximum on the buffer sizes
        spike_buffer_max_size = 0
        variable_buffer_max_size = 0
        self._buffer_size_before_receive = None
        if config.getboolean("Buffers", "enable_buffered_recording"):
            spike_buffer_max_size = config.getint("Buffers",
                                                  "spike_buffer_size")
            variable_buffer_max_size = config.getint("Buffers",
                                                     "variable_buffer_size")

        self._maximum_sdram_for_buffering = [spike_buffer_max_size]
        for _ in self._neuron_impl.get_recordable_variables():
            self._maximum_sdram_for_buffering.append(variable_buffer_max_size)

        # Set up synapse handling
        self._synapse_manager = SynapticManager(
            self._neuron_impl.get_n_synapse_types(), ring_buffer_sigma,
            spikes_per_second, config)

        # bool for if state has changed.
        self._change_requires_mapping = True
        self._change_requires_neuron_parameters_reload = False

        # Set up for profiling
        self._n_profile_samples = helpful_functions.read_config_int(
            config, "Reports", "n_profile_samples")
Esempio n. 22
0
 def _read_config_int(self, section, item):
     return read_config_int(self._config, section, item)
Esempio n. 23
0
    def __init__(
            self, network, dt=constants.DEFAULT_DT,
            time_scale=constants.DEFAULT_TIME_SCALE,
            host_name=None, graph_label=None,
            database_socket_addresses=None, dsg_algorithm=None,
            n_chips_required=None, extra_pre_run_algorithms=None,
            extra_post_run_algorithms=None, decoder_cache=NoDecoderCache(),
            function_of_time_nodes=None,
            function_of_time_nodes_time_period=None):
        """Create a new Simulator with the given network.
        
        :param time_scale: Scaling factor to apply to the simulation, e.g.,\
            a value of `0.5` will cause the simulation to run at twice \
            real-time.
        :type time_scale: float
        :param host_name: Hostname of the SpiNNaker machine to use; if None\  
            then the machine specified in the config file will be used.
        :type host_name: basestring or None
        :param dt: The length of a simulator timestep, in seconds.
        :type dt: float
        :param graph_label: human readable graph label
        :type graph_label: basestring
        :param database_socket_addresses:
        :type database_socket_addresses:
        :param dsg_algorithm:
        :type dsg_algorithm:
        :param n_chips_required:
        :type n_chips_required:
        :param extra_post_run_algorithms:
        :type extra_post_run_algorithms:
        :param extra_pre_run_algorithms:
        :type extra_pre_run_algorithms:
        values
        :rtype None
        """
        self._nengo_object_to_data_map = dict()
        self._profiled_nengo_object_to_data_map = dict()
        self._nengo_to_app_graph_map = None
        self._app_graph_to_nengo_operator_map = None
        self._nengo_app_machine_graph_mapper = None

        executable_finder = ExecutableFinder()
        executable_finder.add_path(os.path.dirname(binaries.__file__))

        # Calculate the machine timestep, this is measured in microseconds
        # (hence the 1e6 scaling factor).
        machine_time_step = (
            int((dt / time_scale) *
                constants.SECONDS_TO_MICRO_SECONDS_CONVERTER))

        xml_paths = list()
        xml_paths.append(os.path.join(os.path.dirname(
            overridden_mapping_algorithms.__file__),
            self.NENGO_ALGORITHM_XML_FILE_NAME))

        SpiNNaker.__init__(
            self, executable_finder, host_name=host_name,
            graph_label=graph_label,
            database_socket_addresses=database_socket_addresses,
            dsg_algorithm=dsg_algorithm,
            n_chips_required=n_chips_required,
            extra_pre_run_algorithms=extra_pre_run_algorithms,
            extra_post_run_algorithms=extra_post_run_algorithms,
            time_scale_factor=time_scale,
            default_config_paths=[(
                os.path.join(os.path.dirname(__file__),
                             self.CONFIG_FILE_NAME))],
            machine_time_step=machine_time_step,
            extra_xml_paths=xml_paths,
            chip_id_allocator="NengoMallocBasedChipIDAllocator")

        # only add the sdram edge allocator if not using a virtual board
        extra_mapping_algorithms = list()
        if not helpful_functions.read_config_boolean(
                self.config, "Machine", "virtual_board"):
            extra_mapping_algorithms.append(
                "NengoSDRAMOutgoingPartitionAllocator")

        if function_of_time_nodes is None:
            function_of_time_nodes = list()
        if function_of_time_nodes_time_period is None:
            function_of_time_nodes_time_period = list()

        # update the main flow with new algorithms and params
        self.extend_extra_mapping_algorithms(extra_mapping_algorithms)
        self.update_extra_inputs(
            {"UserCreateDatabaseFlag": True,
             'DefaultNotifyHostName': self.config.get_str(
                "Database", "notify_hostname"),
             'NengoNodesAsFunctionOfTime': function_of_time_nodes,
             'NengoNodesAsFunctionOfTimeTimePeriod':
                 function_of_time_nodes_time_period,
             'NengoModel': network,
             'NengoDecoderCache': decoder_cache,
             "NengoNodeIOSetting": self.config.get("Simulator", "node_io"),
             "NengoEnsembleProfile":
                 self.config.getboolean("Ensemble", "profile"),
             "NengoEnsembleProfileNumSamples":
                 helpful_functions.read_config_int(
                     self.config, "Ensemble", "profile_num_samples"),
             "NengoRandomNumberGeneratorSeed":
                helpful_functions.read_config_int(
                    self.config, "Simulator", "global_seed"),
             "NengoUtiliseExtraCoreForProbes":
                self.config.getboolean(
                    "Node", "utilise_extra_core_for_probes"),
             "MachineTimeStepInSeconds": dt,
             "ReceiveBufferPort": helpful_functions.read_config_int(
                self.config, "Buffers", "receive_buffer_port"),
             "ReceiveBufferHost": self.config.get(
                 "Buffers", "receive_buffer_host"),
             "MinBufferSize": self.config.getint(
                 "Buffers", "minimum_buffer_sdram"),
             "MaxSinkBuffingSize": self.config.getint(
                 "Buffers", "sink_vertex_max_sdram_for_buffing"),
             "UsingAutoPauseAndResume": self.config.getboolean(
                 "Buffers", "use_auto_pause_and_resume"),
             "TimeBetweenRequests": self.config.getint(
                 "Buffers", "time_between_requests"),
             "BufferSizeBeforeReceive": self.config.getint(
                 "Buffers", "buffer_size_before_receive"),
             "SpikeBufferMaxSize": self.config.getint(
                "Buffers", "spike_buffer_size"),
             "VariableBufferMaxSize": self.config.getint(
                "Buffers", "variable_buffer_size")})

        # build app graph, machine graph, as the main tools expect an
        # application / machine graph level, and cannot go from random to app
        #  graph.
        nengo_app_graph_generator = NengoApplicationGraphGenerator()

        (self._nengo_operator_graph, host_network,
         self._nengo_to_app_graph_map, self._app_graph_to_nengo_operator_map,
         random_number_generator) = \
            nengo_app_graph_generator(
            self._extra_inputs["NengoModel"], self.machine_time_step,
            self._extra_inputs["NengoRandomNumberGeneratorSeed"],
            self._extra_inputs["NengoDecoderCache"],
            self._extra_inputs["NengoUtiliseExtraCoreForProbes"],
            self._extra_inputs["NengoNodesAsFunctionOfTime"],
            self._extra_inputs["NengoNodesAsFunctionOfTimeTimePeriod"],
            self.config.getboolean("Node", "optimise_utilise_interposers"),
            self._print_timings, self._do_timings, self._xml_paths,
            self._pacman_executor_provenance_path,
            self._extra_inputs["NengoEnsembleProfile"],
            self._extra_inputs["NengoEnsembleProfileNumSamples"],
            self._extra_inputs["ReceiveBufferPort"],
            self._extra_inputs["ReceiveBufferHost"],
            self._extra_inputs["MinBufferSize"],
            self._extra_inputs["MaxSinkBuffingSize"],
            self._extra_inputs["UsingAutoPauseAndResume"],
            self._extra_inputs["TimeBetweenRequests"],
            self._extra_inputs["BufferSizeBeforeReceive"],
            self._extra_inputs["SpikeBufferMaxSize"],
            self._extra_inputs["VariableBufferMaxSize"],
            self._extra_inputs["MachineTimeStepInSeconds"])

        # add the extra outputs as new inputs
        self.update_extra_inputs(
            {"NengoHostGraph": host_network,
             "NengoGraphToAppGraphMap": self._nengo_to_app_graph_map,
             "AppGraphToNengoOperatorMap":
                 self._app_graph_to_nengo_operator_map,
             "NengoRandomNumberGenerator": random_number_generator,
             "NengoOperatorGraph": self._nengo_operator_graph})
Esempio n. 24
0
    def __init__(
            self, n_neurons,
            spike_times=default_parameters['spike_times'],
            port=non_pynn_default_parameters['port'],
            tag=non_pynn_default_parameters['tag'],
            ip_address=non_pynn_default_parameters['ip_address'],
            board_address=non_pynn_default_parameters['board_address'],
            max_on_chip_memory_usage_for_spikes_in_bytes=DEFAULT1,
            space_before_notification=non_pynn_default_parameters[
                'space_before_notification'],
            constraints=non_pynn_default_parameters['constraints'],
            label=non_pynn_default_parameters['label'],
            spike_recorder_buffer_size=non_pynn_default_parameters[
                'spike_recorder_buffer_size'],
            buffer_size_before_receive=non_pynn_default_parameters[
                'buffer_size_before_receive']):
        # pylint: disable=too-many-arguments
        self._model_name = "SpikeSourceArray"

        config = globals_variables.get_simulator().config
        self._ip_address = ip_address
        if ip_address is None:
            self._ip_address = config.get("Buffers", "receive_buffer_host")
        self._port = port
        if port is None:
            self._port = helpful_functions.read_config_int(
                config, "Buffers", "receive_buffer_port")
        if spike_times is None:
            spike_times = []

        super(SpikeSourceArray, self).__init__(
            n_keys=n_neurons, label=label, constraints=constraints,
            max_atoms_per_core=(
                SpikeSourceArray._model_based_max_atoms_per_core),
            board_address=board_address,
            receive_port=None, receive_tag=None,
            virtual_key=None, prefix=None, prefix_type=None, check_keys=False,
            send_buffer_times=spike_times,
            send_buffer_partition_id=constants.SPIKE_PARTITION_ID,
            send_buffer_max_space=max_on_chip_memory_usage_for_spikes_in_bytes,
            send_buffer_space_before_notify=space_before_notification,
            buffer_notification_ip_address=self._ip_address,
            buffer_notification_port=self._port,
            buffer_notification_tag=tag)

        # handle recording
        self._spike_recorder = EIEIOSpikeRecorder()
        self._spike_recorder_buffer_size = spike_recorder_buffer_size
        self._buffer_size_before_receive = buffer_size_before_receive

        # Keep track of any previously generated buffers
        self._send_buffers = dict()
        self._spike_recording_region_size = None
        self._machine_vertices = list()

        # used for reset and rerun
        self._requires_mapping = True
        self._last_runtime_position = 0

        self._max_on_chip_memory_usage_for_spikes = \
            max_on_chip_memory_usage_for_spikes_in_bytes
        self._space_before_notification = space_before_notification
        if self._max_on_chip_memory_usage_for_spikes is None:
            self._max_on_chip_memory_usage_for_spikes = \
                MAX_SIZE_OF_BUFFERED_REGION_ON_CHIP

        # check the values do not conflict with chip memory limit
        if self._max_on_chip_memory_usage_for_spikes < 0:
            raise exceptions.ConfigurationException(
                "The memory usage on chip is either beyond what is supportable"
                " on the spinnaker board being supported or you have requested"
                " a negative value for a memory usage. Please correct and"
                " try again")

        if (self._max_on_chip_memory_usage_for_spikes <
                self._space_before_notification):
            self._space_before_notification =\
                self._max_on_chip_memory_usage_for_spikes
 def _read_config_int(self, section, item):
     return read_config_int(self._config, section, item)
    def __init__(self,
                 n_neurons,
                 constraints,
                 label,
                 seed,
                 max_atoms_per_core,
                 model,
                 rate=None,
                 start=None,
                 duration=None,
                 rates=None,
                 starts=None,
                 durations=None,
                 max_rate=None):
        # pylint: disable=too-many-arguments
        super(SpikeSourcePoissonVertex, self).__init__(label, constraints,
                                                       max_atoms_per_core)

        # atoms params
        self.__n_atoms = n_neurons
        self.__model_name = "SpikeSourcePoisson"
        self.__model = model
        self.__seed = seed
        self.__kiss_seed = dict()
        self.__rng = None
        self.__n_subvertices = 0
        self.__n_data_specs = 0

        # check for changes parameters
        self.__change_requires_mapping = True
        self.__change_requires_neuron_parameters_reload = False

        self.__spike_recorder = MultiSpikeRecorder()

        # Check for disallowed pairs of parameters
        if (rates is not None) and (rate is not None):
            raise Exception("Exactly one of rate and rates can be specified")
        if (starts is not None) and (start is not None):
            raise Exception("Exactly one of start and starts can be specified")
        if (durations is not None) and (duration is not None):
            raise Exception(
                "Exactly one of duration and durations can be specified")
        if rate is None and rates is None:
            raise Exception("One of rate or rates must be specified")

        # Normalise the parameters
        self.__is_variable_rate = rates is not None
        if rates is None:
            if hasattr(rate, "__len__"):
                # Single rate per neuron for whole simulation
                rates = [numpy.array([r]) for r in rate]
            else:
                # Single rate for all neurons for whole simulation
                rates = numpy.array([rate])
        elif hasattr(rates[0], "__len__"):
            # Convert each list to numpy array
            rates = [numpy.array(r) for r in rates]
        else:
            rates = numpy.array(rates)
        if starts is None and start is not None:
            if hasattr(start, "__len__"):
                starts = [numpy.array([s]) for s in start]
            elif start is None:
                starts = numpy.array([0])
            else:
                starts = numpy.array([start])
        elif starts is not None and hasattr(starts[0], "__len__"):
            starts = [numpy.array(s) for s in starts]
        elif starts is not None:
            starts = numpy.array(starts)
        if durations is None and duration is not None:
            if hasattr(duration, "__len__"):
                durations = [numpy.array([d]) for d in duration]
            else:
                durations = numpy.array([duration])
        elif durations is not None and hasattr(durations[0], "__len__"):
            durations = [numpy.array(d) for d in durations]
        elif durations is not None:
            durations = numpy.array(durations)
        else:
            if hasattr(rates[0], "__len__"):
                durations = [
                    numpy.array([None for r in _rate]) for _rate in rates
                ]
            else:
                durations = numpy.array([None for _rate in rates])

        # Check that there is either one list for all neurons,
        # or one per neuron
        if hasattr(rates[0], "__len__") and len(rates) != n_neurons:
            raise Exception(
                "Must specify one rate for all neurons or one per neuron")
        if (starts is not None and hasattr(starts[0], "__len__")
                and len(starts) != n_neurons):
            raise Exception(
                "Must specify one start for all neurons or one per neuron")
        if (durations is not None and hasattr(durations[0], "__len__")
                and len(durations) != n_neurons):
            raise Exception(
                "Must specify one duration for all neurons or one per neuron")

        # Check that for each rate there is a start and duration if needed
        # TODO: Could be more efficient for case where parameters are not one
        #       per neuron
        for i in range(n_neurons):
            rate_set = rates
            if hasattr(rates[0], "__len__"):
                rate_set = rates[i]
            if not hasattr(rate_set, "__len__"):
                raise Exception("Multiple rates must be a list")
            if starts is None and len(rate_set) > 1:
                raise Exception("When multiple rates are specified,"
                                " each must have a start")
            elif starts is not None:
                start_set = starts
                if hasattr(starts[0], "__len__"):
                    start_set = starts[i]
                if len(start_set) != len(rate_set):
                    raise Exception("Each rate must have a start")
                if any(s is None for s in start_set):
                    raise Exception("Start must not be None")
            if durations is not None:
                duration_set = durations
                if hasattr(durations[0], "__len__"):
                    duration_set = durations[i]
                if len(duration_set) != len(rate_set):
                    raise Exception("Each rate must have its own duration")

        if hasattr(rates[0], "__len__"):
            time_to_spike = [
                numpy.array([0 for _ in range(len(rates[i]))])
                for i in range(len(rates))
            ]
        else:
            time_to_spike = numpy.array([0 for _ in range(len(rates))])

        self.__data = SpynnakerRangeDictionary(n_neurons)
        self.__data["rates"] = SpynnakerRangedList(
            n_neurons,
            rates,
            use_list_as_value=not hasattr(rates[0], "__len__"))
        self.__data["starts"] = SpynnakerRangedList(
            n_neurons,
            starts,
            use_list_as_value=not hasattr(starts[0], "__len__"))
        self.__data["durations"] = SpynnakerRangedList(
            n_neurons,
            durations,
            use_list_as_value=not hasattr(durations[0], "__len__"))
        self.__data["time_to_spike"] = SpynnakerRangedList(
            n_neurons,
            time_to_spike,
            use_list_as_value=not hasattr(time_to_spike[0], "__len__"))
        self.__rng = numpy.random.RandomState(seed)
        self.__rate_change = numpy.zeros(n_neurons)
        self.__machine_time_step = None

        # get config from simulator
        config = globals_variables.get_simulator().config
        self.__n_profile_samples = helpful_functions.read_config_int(
            config, "Reports", "n_profile_samples")

        # Prepare for recording, and to get spikes
        self.__spike_recorder = MultiSpikeRecorder()

        all_rates = list(_flatten(self.__data["rates"]))
        self.__max_rate = max_rate
        if max_rate is None and len(all_rates):
            self.__max_rate = numpy.amax(all_rates)
        elif max_rate is None:
            self.__max_rate = 0

        total_rate = numpy.sum(all_rates)
        self.__max_spikes = 0
        if total_rate > 0:
            # Note we have to do this per rate, as the whole array is not numpy
            max_rates = numpy.array(
                [numpy.max(r) for r in self.__data["rates"]])
            self.__max_spikes = numpy.sum(
                scipy.stats.poisson.ppf(1.0 - (1.0 / max_rates), max_rates))