Exemplo n.º 1
0
    def build_node(self, model, node):
        """Modify the model to build the Node."""
        f_of_t = node.size_in == 0 and (
            not callable(node.output) or
            getconfig(model.config, node, "function_of_time", False)
        )

        if node.output is None:
            # If the Node is a passthrough Node then create a new filter object
            # for it.  We might be requested to use a fixed number of cores or
            # chips, we extract that information from the config.
            n_cores = getconfig(model.config, node, "n_cores_per_chip")
            n_chips = getconfig(model.config, node, "n_chips")

            op = Filter(node.size_in, n_cores_per_chip=n_cores,
                        n_chips=n_chips)
            self.passthrough_nodes[node] = op
            model.object_operators[node] = op
        elif f_of_t:
            # If the Node is a function of time then add a new value source for
            # it.  Determine the period by looking in the config, if the output
            # is a constant then the period is dt (i.e., it repeats every
            # timestep).
            if callable(node.output) or isinstance(node.output, Process):
                period = getconfig(model.config, node,
                                   "function_of_time_period")
            else:
                period = model.dt

            vs = ValueSource(node.output, node.size_out, period)
            self._f_of_t_nodes[node] = vs
            model.object_operators[node] = vs
        else:
            with self.host_network:
                self._add_node(node)
Exemplo n.º 2
0
    def build_node(self, model, node):
        """Modify the model to build the Node."""
        f_of_t = node.size_in == 0 and (
            not callable(node.output) or
            getconfig(model.config, node, "function_of_time", False)
        )

        if node.output is None:
            # If the Node is a passthrough Node then create a new placeholder
            # for the passthrough node.
            op = PassthroughNode(node.label)
            self.passthrough_nodes[node] = op
            model.object_operators[node] = op
        elif f_of_t:
            # If the Node is a function of time then add a new value source for
            # it.  Determine the period by looking in the config, if the output
            # is a constant then the period is dt (i.e., it repeats every
            # timestep).
            if callable(node.output) or isinstance(node.output, Process):
                period = getconfig(model.config, node,
                                   "function_of_time_period")
            else:
                period = model.dt

            vs = ValueSource(node.output, node.size_out, period)
            self._f_of_t_nodes[node] = vs
            model.object_operators[node] = vs
        else:
            with self.host_network:
                self._add_node(node)
Exemplo n.º 3
0
    def build_node(self, model, node):
        """Modify the model to build the Node."""
        f_of_t = node.size_in == 0 and (not callable(node.output) or getconfig(
            model.config, node, "function_of_time", False))

        if node.output is None:
            # If the Node is a passthrough Node then create a new placeholder
            # for the passthrough node.
            op = PassthroughNode(node.label)
            self.passthrough_nodes[node] = op
            model.object_operators[node] = op
        elif f_of_t:
            # If the Node is a function of time then add a new value source for
            # it.  Determine the period by looking in the config, if the output
            # is a constant then the period is dt (i.e., it repeats every
            # timestep).
            if callable(node.output) or isinstance(node.output, Process):
                period = getconfig(model.config, node,
                                   "function_of_time_period")
            else:
                period = model.dt

            vs = ValueSource(node.output, node.size_out, period)
            self._f_of_t_nodes[node] = vs
            model.object_operators[node] = vs
        else:
            with self.host_network:
                self._add_node(node)
Exemplo n.º 4
0
    def build_node(self, model, node):
        """Modify the model to build the Node."""
        f_of_t = node.size_in == 0 and (
            not callable(node.output) or
            getconfig(model.config, node, "function_of_time", False)
        )

        if node.output is None:
            # If the Node is a passthrough Node then create a new filter object
            # for it.
            op = Filter(node.size_in)
            self._passthrough_nodes[node] = op
            model.object_operators[node] = op
        elif f_of_t and self.allow_f_of_t_nodes:
            # If the Node is a function of time then add a new value source for
            # it.  Determine the period by looking in the config, if the output
            # is a constant then the period is dt (i.e., it repeats every
            # timestep).
            if not callable(node.output):
                period = model.dt
            else:
                period = getconfig(model.config, node,
                                   "function_of_time_period")

            vs = ValueSource(node.output, node.size_out, period)
            self._f_of_t_nodes[node] = vs
            model.object_operators[node] = vs
        else:
            if f_of_t:
                warnings.warn("{} will not be precomputed as function of time "
                              "Nodes are currently disabled.".format(node))
            with self.host_network:
                self._add_node(node)
Exemplo n.º 5
0
    def build_node(self, model, node):
        """Modify the model to build the Node."""
        f_of_t = node.size_in == 0 and (not callable(node.output) or getconfig(
            model.config, node, "function_of_time", False))

        if node.output is None:
            # If the Node is a passthrough Node then create a new filter object
            # for it.  We might be requested to use a fixed number of cores or
            # chips, we extract that information from the config.
            n_cores = getconfig(model.config, node, "n_cores_per_chip")
            n_chips = getconfig(model.config, node, "n_chips")

            op = Filter(node.size_in,
                        n_cores_per_chip=n_cores,
                        n_chips=n_chips)
            self.passthrough_nodes[node] = op
            model.object_operators[node] = op
        elif f_of_t:
            # If the Node is a function of time then add a new value source for
            # it.  Determine the period by looking in the config, if the output
            # is a constant then the period is dt (i.e., it repeats every
            # timestep).
            if callable(node.output) or isinstance(node.output, Process):
                period = getconfig(model.config, node,
                                   "function_of_time_period")
            else:
                period = model.dt

            vs = ValueSource(node.output, node.size_out, period)
            self._f_of_t_nodes[node] = vs
            model.object_operators[node] = vs
        else:
            with self.host_network:
                self._add_node(node)
Exemplo n.º 6
0
def optimise_out_passthrough_nodes(model, passthrough_nodes, config,
                                   forced_removals=set()):
    """Remove passthrough Nodes from a network.

    Other Parameters
    ----------------
    forced_removals : {Node, ...}
        Set of Nodes which should be removed regardless of the configuration
        settings.
    """
    for node, operator in iteritems(passthrough_nodes):
        removed = False

        # Determine whether to remove the Node or not (if True then definitely
        # remove, if None then remove if it doesn't worsen network usage).
        remove_node = (node in forced_removals or
                       getconfig(config, node, "optimize_out"))
        if remove_node or remove_node is None:
            removed = remove_operator_from_connection_map(
                model.connection_map, operator, force=bool(remove_node))

        # Log if the Node was removed
        if removed:
            if node in model.object_operators:
                model.object_operators.pop(node)
            else:
                model.extra_operators.remove(operator)

            logger.info("Passthrough Node {!s} was optimized out".format(node))
Exemplo n.º 7
0
def optimise_out_passthrough_nodes(model,
                                   passthrough_nodes,
                                   config,
                                   forced_removals=set()):
    """Remove passthrough Nodes from a network.

    Other Parameters
    ----------------
    forced_removals : {Node, ...}
        Set of Nodes which should be removed regardless of the configuration
        settings.
    """
    for node, operator in iteritems(passthrough_nodes):
        removed = False

        # Determine whether to remove the Node or not (if True then definitely
        # remove, if None then remove if it doesn't worsen network usage).
        remove_node = (node in forced_removals
                       or getconfig(config, node, "optimize_out"))
        if remove_node or remove_node is None:
            removed = remove_operator_from_connection_map(
                model.connection_map, operator, force=bool(remove_node))

        # Log if the Node was removed
        if removed:
            if node in model.object_operators:
                model.object_operators.pop(node)
            else:
                model.extra_operators.remove(operator)

            logger.info("Passthrough Node {!s} was optimized out".format(node))
def test_getconfig():
    # Create a network, DON'T add nengo_spinnaker configuration
    with nengo.Network() as net:
        n = nengo.Node(lambda t: [t, t+2])

    # Use getconfig to get configuration options that don't exist get
    placer = mock.Mock()
    assert getconfig(net.config, Simulator, "placer", placer) is placer
    assert not getconfig(net.config, n, "function_of_time", False)

    # Now add the config
    add_spinnaker_params(net.config)
    net.config[n].function_of_time = True

    # Use getconfig to get configuration options from the config
    assert getconfig(net.config, Simulator, "placer", placer) is not placer
    assert getconfig(net.config, n, "function_of_time", False)
Exemplo n.º 9
0
    def make_vertices(self, model, n_steps):
        """Construct the data which can be loaded into the memory of a
        SpiNNaker machine.
        """
        # Build encoders, gain and bias regions
        params = model.params[self.ensemble]
        ens_regions = dict()

        # Convert the encoders combined with the gain to S1615 before creating
        # the region.
        encoders_with_gain = params.scaled_encoders
        ens_regions[EnsembleRegions.encoders] = regions.MatrixRegion(
            tp.np_to_fix(encoders_with_gain),
            sliced_dimension=regions.MatrixPartitioning.rows)

        # Combine the direct input with the bias before converting to S1615 and
        # creating the region.
        bias_with_di = params.bias + np.dot(encoders_with_gain,
                                            self.direct_input)
        assert bias_with_di.ndim == 1
        ens_regions[EnsembleRegions.bias] = regions.MatrixRegion(
            tp.np_to_fix(bias_with_di),
            sliced_dimension=regions.MatrixPartitioning.rows)

        # Convert the gains to S1615 before creating the region
        ens_regions[EnsembleRegions.gain] = regions.MatrixRegion(
            tp.np_to_fix(params.gain),
            sliced_dimension=regions.MatrixPartitioning.rows)

        # Extract all the filters from the incoming connections
        incoming = model.get_signals_to_object(self)

        (ens_regions[EnsembleRegions.input_filters],
         ens_regions[EnsembleRegions.input_routing]) = make_filter_regions(
            incoming[InputPort.standard], model.dt, True,
            model.keyspaces.filter_routing_tag,
            width=self.ensemble.size_in
        )
        (ens_regions[EnsembleRegions.inhibition_filters],
         ens_regions[EnsembleRegions.inhibition_routing]) = \
            make_filter_regions(
                incoming[EnsembleInputPort.global_inhibition], model.dt, True,
                model.keyspaces.filter_routing_tag, width=1
            )

        # Extract all the decoders for the outgoing connections and build the
        # regions for the decoders and the regions for the output keys.
        outgoing = model.get_signals_from_object(self)
        if OutputPort.standard in outgoing:
            decoders, output_keys = \
                get_decoders_and_keys(outgoing[OutputPort.standard], True)
        else:
            decoders = np.array([])
            output_keys = list()
        size_out = decoders.shape[0]

        ens_regions[EnsembleRegions.decoders] = regions.MatrixRegion(
            tp.np_to_fix(decoders / model.dt),
            sliced_dimension=regions.MatrixPartitioning.rows)
        ens_regions[EnsembleRegions.keys] = regions.KeyspacesRegion(
            output_keys,
            fields=[regions.KeyField({'cluster': 'cluster'})],
            partitioned_by_atom=True
        )

        # The population length region stores information about groups of
        # co-operating cores.
        ens_regions[EnsembleRegions.population_length] = \
            regions.ListRegion("I")

        # The ensemble region contains basic information about the ensemble
        ens_regions[EnsembleRegions.ensemble] = EnsembleRegion(
            model.machine_timestep, self.ensemble.size_in)

        # The neuron region contains information specific to the neuron type
        ens_regions[EnsembleRegions.neuron] = LIFRegion(
            model.dt, self.ensemble.neuron_type.tau_rc,
            self.ensemble.neuron_type.tau_ref
        )

        # Manage profiling
        n_profiler_samples = 0
        self.profiled = getconfig(model.config, self.ensemble, "profile",
                                  False)
        if self.profiled:
            # Try and get number of samples from config
            n_profiler_samples = getconfig(model.config, self.ensemble,
                                           "profile_num_samples")

            # If it's not specified, calculate sensible default
            if n_profiler_samples is None:
                n_profiler_samples = (len(EnsembleSlice.profiler_tag_names) *
                                      n_steps * 2)

        # Create profiler region
        ens_regions[EnsembleRegions.profiler] = regions.Profiler(
            n_profiler_samples)
        ens_regions[EnsembleRegions.ensemble].n_profiler_samples = \
            n_profiler_samples

        # Manage probes
        for probe in self.local_probes:
            if probe.attr in ("output", "spikes"):
                self.record_spikes = True
            elif probe.attr == "voltage":
                self.record_voltages = True
            else:
                raise NotImplementedError(
                    "Cannot probe {} on Ensembles".format(probe.attr)
                )

        # Set the flags
        ens_regions[EnsembleRegions.ensemble].record_spikes = \
            self.record_spikes
        ens_regions[EnsembleRegions.ensemble].record_voltages = \
            self.record_voltages

        # Create the probe recording regions
        ens_regions[EnsembleRegions.spikes] = regions.SpikeRecordingRegion(
            n_steps if self.record_spikes else 0)
        ens_regions[EnsembleRegions.voltages] = regions.VoltageRecordingRegion(
            n_steps if self.record_voltages else 0)

        # Create constraints against which to partition, initially assume that
        # we can devote 16 cores to every problem.
        sdram_constraint = partition.Constraint(128 * 2**20,
                                                0.9)  # 90% of 128MiB
        dtcm_constraint = partition.Constraint(16 * 64 * 2**10,
                                               0.9)  # 90% of 16 cores DTCM

        # The number of cycles available is 200MHz * the machine timestep; or
        # 200 * the machine timestep in microseconds.
        cycles = 200 * model.machine_timestep
        cpu_constraint = partition.Constraint(cycles * 16,
                                              0.8)  # 80% of 16 cores compute

        # Form the constraints dictionary
        def _make_constraint(f, size_in, size_out, **kwargs):
            """Wrap a usage computation method to work with the partitioner."""
            def f_(vertex_slice):
                # Calculate the number of neurons
                n_neurons = vertex_slice.stop - vertex_slice.start

                # Call the original method
                return f(size_in, size_out, n_neurons, **kwargs)
            return f_

        partition_constraints = {
            sdram_constraint: _make_constraint(_lif_sdram_usage,
                                               self.ensemble.size_in,
                                               size_out),
            dtcm_constraint: _make_constraint(_lif_dtcm_usage,
                                              self.ensemble.size_in, size_out),
            cpu_constraint: _make_constraint(_lif_cpu_usage,
                                             self.ensemble.size_in, size_out),
        }

        # Partition the ensemble to create clusters of co-operating cores
        self.clusters = list()
        vertices = list()
        constraints = list()
        for sl in partition.partition(slice(0, self.ensemble.n_neurons),
                                      partition_constraints):
            # For each slice we create a cluster of co-operating cores.  We
            # instantiate the cluster and then ask it to produce vertices which
            # will be added to the netlist.
            cluster = EnsembleCluster(sl, self.ensemble.size_in, size_out,
                                      ens_regions)
            self.clusters.append(cluster)

            # Get the vertices for the cluster
            cluster_vertices = cluster.make_vertices(cycles)
            vertices.extend(cluster_vertices)

            # Create a constraint which forces these vertices to be present on
            # the same chip
            constraints.append(SameChipConstraint(cluster_vertices))

        # Return the vertices and callback methods
        return netlistspec(vertices, self.load_to_machine,
                           after_simulation_function=self.after_simulation,
                           constraints=constraints)
Exemplo n.º 10
0
    def make_vertices(self, model, n_steps):
        """Construct the data which can be loaded into the memory of a
        SpiNNaker machine.
        """
        # Build encoders, gain and bias regions
        params = model.params[self.ensemble]
        ens_regions = dict()

        # Convert the encoders combined with the gain to S1615 before creating
        # the region.
        encoders_with_gain = params.scaled_encoders
        ens_regions[EnsembleRegions.encoders] = regions.MatrixRegion(
            tp.np_to_fix(encoders_with_gain),
            sliced_dimension=regions.MatrixPartitioning.rows)

        # Combine the direct input with the bias before converting to S1615 and
        # creating the region.
        bias_with_di = params.bias + np.dot(encoders_with_gain,
                                            self.direct_input)
        assert bias_with_di.ndim == 1
        ens_regions[EnsembleRegions.bias] = regions.MatrixRegion(
            tp.np_to_fix(bias_with_di),
            sliced_dimension=regions.MatrixPartitioning.rows)

        # Convert the gains to S1615 before creating the region
        ens_regions[EnsembleRegions.gain] = regions.MatrixRegion(
            tp.np_to_fix(params.gain),
            sliced_dimension=regions.MatrixPartitioning.rows)

        # Extract all the filters from the incoming connections
        incoming = model.get_signals_to_object(self)

        (ens_regions[EnsembleRegions.input_filters],
         ens_regions[EnsembleRegions.input_routing]) = make_filter_regions(
             incoming[InputPort.standard],
             model.dt,
             True,
             model.keyspaces.filter_routing_tag,
             width=self.ensemble.size_in)
        (ens_regions[EnsembleRegions.inhibition_filters],
         ens_regions[EnsembleRegions.inhibition_routing]) = \
            make_filter_regions(
                incoming[EnsembleInputPort.global_inhibition], model.dt, True,
                model.keyspaces.filter_routing_tag, width=1
            )

        # Extract all the decoders for the outgoing connections and build the
        # regions for the decoders and the regions for the output keys.
        outgoing = model.get_signals_from_object(self)
        if OutputPort.standard in outgoing:
            decoders, output_keys = \
                get_decoders_and_keys(outgoing[OutputPort.standard], True)
        else:
            decoders = np.array([])
            output_keys = list()
        size_out = decoders.shape[0]

        ens_regions[EnsembleRegions.decoders] = regions.MatrixRegion(
            tp.np_to_fix(decoders / model.dt),
            sliced_dimension=regions.MatrixPartitioning.rows)
        ens_regions[EnsembleRegions.keys] = regions.KeyspacesRegion(
            output_keys,
            fields=[regions.KeyField({'cluster': 'cluster'})],
            partitioned_by_atom=True)

        # The population length region stores information about groups of
        # co-operating cores.
        ens_regions[EnsembleRegions.population_length] = \
            regions.ListRegion("I")

        # The ensemble region contains basic information about the ensemble
        ens_regions[EnsembleRegions.ensemble] = EnsembleRegion(
            model.machine_timestep, self.ensemble.size_in)

        # The neuron region contains information specific to the neuron type
        ens_regions[EnsembleRegions.neuron] = LIFRegion(
            model.dt, self.ensemble.neuron_type.tau_rc,
            self.ensemble.neuron_type.tau_ref)

        # Manage profiling
        n_profiler_samples = 0
        self.profiled = getconfig(model.config, self.ensemble, "profile",
                                  False)
        if self.profiled:
            # Try and get number of samples from config
            n_profiler_samples = getconfig(model.config, self.ensemble,
                                           "profile_num_samples")

            # If it's not specified, calculate sensible default
            if n_profiler_samples is None:
                n_profiler_samples = (len(EnsembleSlice.profiler_tag_names) *
                                      n_steps * 2)

        # Create profiler region
        ens_regions[EnsembleRegions.profiler] = regions.Profiler(
            n_profiler_samples)
        ens_regions[EnsembleRegions.ensemble].n_profiler_samples = \
            n_profiler_samples

        # Manage probes
        for probe in self.local_probes:
            if probe.attr in ("output", "spikes"):
                self.record_spikes = True
            elif probe.attr == "voltage":
                self.record_voltages = True
            else:
                raise NotImplementedError(
                    "Cannot probe {} on Ensembles".format(probe.attr))

        # Set the flags
        ens_regions[EnsembleRegions.ensemble].record_spikes = \
            self.record_spikes
        ens_regions[EnsembleRegions.ensemble].record_voltages = \
            self.record_voltages

        # Create the probe recording regions
        ens_regions[EnsembleRegions.spikes] = regions.SpikeRecordingRegion(
            n_steps if self.record_spikes else 0)
        ens_regions[EnsembleRegions.voltages] = regions.VoltageRecordingRegion(
            n_steps if self.record_voltages else 0)

        # Create constraints against which to partition, initially assume that
        # we can devote 16 cores to every problem.
        sdram_constraint = partition.Constraint(128 * 2**20,
                                                0.9)  # 90% of 128MiB
        dtcm_constraint = partition.Constraint(16 * 64 * 2**10,
                                               0.9)  # 90% of 16 cores DTCM

        # The number of cycles available is 200MHz * the machine timestep; or
        # 200 * the machine timestep in microseconds.
        cycles = 200 * model.machine_timestep
        cpu_constraint = partition.Constraint(cycles * 16,
                                              0.8)  # 80% of 16 cores compute

        # Form the constraints dictionary
        def _make_constraint(f, size_in, size_out, **kwargs):
            """Wrap a usage computation method to work with the partitioner."""
            def f_(vertex_slice):
                # Calculate the number of neurons
                n_neurons = vertex_slice.stop - vertex_slice.start

                # Call the original method
                return f(size_in, size_out, n_neurons, **kwargs)

            return f_

        partition_constraints = {
            sdram_constraint:
            _make_constraint(_lif_sdram_usage, self.ensemble.size_in,
                             size_out),
            dtcm_constraint:
            _make_constraint(_lif_dtcm_usage, self.ensemble.size_in, size_out),
            cpu_constraint:
            _make_constraint(_lif_cpu_usage, self.ensemble.size_in, size_out),
        }

        # Partition the ensemble to create clusters of co-operating cores
        self.clusters = list()
        vertices = list()
        constraints = list()
        for sl in partition.partition(slice(0, self.ensemble.n_neurons),
                                      partition_constraints):
            # For each slice we create a cluster of co-operating cores.  We
            # instantiate the cluster and then ask it to produce vertices which
            # will be added to the netlist.
            cluster = EnsembleCluster(sl, self.ensemble.size_in, size_out,
                                      ens_regions)
            self.clusters.append(cluster)

            # Get the vertices for the cluster
            cluster_vertices = cluster.make_vertices(cycles)
            vertices.extend(cluster_vertices)

            # Create a constraint which forces these vertices to be present on
            # the same chip
            constraints.append(SameChipConstraint(cluster_vertices))

        # Return the vertices and callback methods
        return netlistspec(vertices,
                           self.load_to_machine,
                           after_simulation_function=self.after_simulation,
                           constraints=constraints)
Exemplo n.º 11
0
    def make_vertices(self, model, n_steps):  # TODO remove n_steps
        """Construct the data which can be loaded into the memory of a
        SpiNNaker machine.
        """
        # Build encoders, gain and bias regions
        params = model.params[self.ensemble]

        # Convert the encoders combined with the gain to S1615 before creating
        # the region.
        encoders_with_gain = params.scaled_encoders
        self.encoders_region = regions.MatrixRegion(
            tp.np_to_fix(encoders_with_gain),
            sliced_dimension=regions.MatrixPartitioning.rows
        )

        # Combine the direct input with the bias before converting to S1615 and
        # creating the region.
        bias_with_di = params.bias + np.dot(encoders_with_gain,
                                            self.direct_input)
        assert bias_with_di.ndim == 1
        self.bias_region = regions.MatrixRegion(
            tp.np_to_fix(bias_with_di),
            sliced_dimension=regions.MatrixPartitioning.rows
        )

        # Convert the gains to S1615 before creating the region
        self.gain_region = regions.MatrixRegion(
            tp.np_to_fix(params.gain),
            sliced_dimension=regions.MatrixPartitioning.rows
        )

        # Extract all the filters from the incoming connections
        incoming = model.get_signals_connections_to_object(self)

        self.input_filters, self.input_filter_routing = make_filter_regions(
            incoming[InputPort.standard], model.dt, True,
            model.keyspaces.filter_routing_tag, width=self.ensemble.size_in
        )
        self.inhib_filters, self.inhib_filter_routing = make_filter_regions(
            incoming[EnsembleInputPort.global_inhibition], model.dt, True,
            model.keyspaces.filter_routing_tag, width=1
        )
        self.mod_filters, self.mod_filter_routing = make_filter_regions(
            {}, model.dt, True, model.keyspaces.filter_routing_tag
        )

        # Extract all the decoders for the outgoing connections and build the
        # regions for the decoders and the regions for the output keys.
        outgoing = model.get_signals_connections_from_object(self)
        decoders, output_keys = \
            get_decoders_and_keys(model, outgoing[OutputPort.standard], True)
        size_out = decoders.shape[1]

        # TODO: Include learnt decoders
        self.pes_region = PESRegion()

        self.decoders_region = regions.MatrixRegion(
            tp.np_to_fix(decoders / model.dt),
            sliced_dimension=regions.MatrixPartitioning.rows
        )
        self.output_keys_region = regions.KeyspacesRegion(
            output_keys, fields=[regions.KeyField({'cluster': 'cluster'})]
        )

        # Create the recording regions for locally situated probes
        self.spike_region = None
        self.probe_spikes = False
        self.voltage_region = None
        self.probe_voltages = False

        for probe in self.local_probes:
            # For each probe determine which regions and flags should be set
            if probe.attr in ("output", "spikes"):
                # If spikes are being probed then ensure that the flag is set
                # and a region exists.
                if not self.probe_spikes:
                    self.spike_region = SpikeRegion(n_steps)
                    self.probe_spikes = True
            elif probe.attr in ("voltage"):
                # If voltages are being probed then ensure that the flag is set
                # and a region exists.
                if not self.probe_voltages:
                    self.voltage_region = VoltageRegion(n_steps)
                    self.probe_voltages = True

        # If profiling is enabled
        num_profiler_samples = 0
        if getconfig(model.config, self.ensemble, "profile", False):
            # Try and get number of samples from config
            num_profiler_samples = getconfig(model.config, self.ensemble,
                                             "profile_num_samples")

            # If it's not specified, calculate sensible default
            if num_profiler_samples is None:
                num_profiler_samples =\
                    len(EnsembleLIF.profiler_tag_names) * n_steps * 2

        # Create profiler region
        self.profiler_region = regions.Profiler(num_profiler_samples)

        # Create the regions list
        self.regions = [
            SystemRegion(self.ensemble.size_in,
                         size_out,
                         model.machine_timestep,
                         self.ensemble.neuron_type.tau_ref,
                         self.ensemble.neuron_type.tau_rc,
                         model.dt,
                         self.probe_spikes,
                         self.probe_voltages,
                         num_profiler_samples
                         ),
            self.bias_region,
            self.encoders_region,
            self.decoders_region,
            self.output_keys_region,
            self.input_filters,
            self.input_filter_routing,
            self.inhib_filters,
            self.inhib_filter_routing,
            self.gain_region,
            self.mod_filters,
            self.mod_filter_routing,
            self.pes_region,
            self.profiler_region,
            self.spike_region,
            self.voltage_region,
        ]

        # Partition the ensemble and get a list of vertices to load to the
        # machine.  We can expect to be DTCM or CPU bound, so the SDRAM bound
        # can be quite lax to allow for lots of data probing.
        # TODO: Include other DTCM usage
        def cpu_usage(sl):
            """Calculate the CPU usage (in cycles) based on the number of
            neurons and the size_in and size_out of the ensemble.

            The equation and coefficients are taken from: "An Efficient
            SpiNNaker Implementation of the NEF", Mundy, Knight, Stewart and
            Furber [IJCNN 2015]
            """
            n_neurons = (sl.stop - sl.start)
            return (245 + 43*self.ensemble.size_in + 100 + 702*size_out +
                    188 + 69*n_neurons + 13*n_neurons*self.ensemble.size_in)

        self.vertices = list()
        sdram_constraint = partition.Constraint(8*2**20)  # Max 8MiB
        dtcm_constraint = partition.Constraint(64*2**10, .75)  # 75% of 64KiB
        cpu_constraint = partition.Constraint(200000, .8)  # 80% of 200k cycles
        constraints = {
            sdram_constraint: lambda s: regions.utils.sizeof_regions(
                self.regions, s),
            # **HACK** don't include last three regions in DTCM estimate
            # (profiler and spike recording)
            dtcm_constraint: lambda s: regions.utils.sizeof_regions(
                self.regions[:-3], s) + 5*(s.stop - s.start),
            cpu_constraint: cpu_usage,
        }
        app_name = (
            "ensemble_profiled" if num_profiler_samples > 0
            else "ensemble"
        )
        for sl in partition.partition(slice(0, self.ensemble.n_neurons),
                                      constraints):
            resources = {
                Cores: 1,
                SDRAM: regions.utils.sizeof_regions(self.regions, sl),
            }
            vsl = VertexSlice(sl, get_application(app_name), resources)
            self.vertices.append(vsl)

        # Return the vertices and callback methods
        return netlistspec(self.vertices, self.load_to_machine,
                           after_simulation_function=self.after_simulation)