Exemplo n.º 1
0
    def make_vertices(self, model, n_steps):
        """Create the vertices to be simulated on the machine."""
        # Create the system region
        self.system_region = SystemRegion(model.machine_timestep,
                                          self.period is not None, n_steps)

        # Get all the outgoing signals to determine how big the size out is and
        # to build a list of keys.
        sigs_conns = model.get_signals_from_object(self)
        if len(sigs_conns) == 0:
            return netlistspec([])

        keys = list()
        self.transmission_parameters = list()
        for sig, transmission_params in sigs_conns[OutputPort.standard]:
            # Add the keys for this connection
            transform, sig_keys = get_transform_keys(sig, transmission_params)
            keys.extend(sig_keys)
            self.transmission_parameters.append((transmission_params,
                                                 transform))
        size_out = len(keys)

        # Build the keys region
        self.keys_region = regions.KeyspacesRegion(
            keys, [regions.KeyField({"cluster": "cluster"})],
            partitioned_by_atom=True
        )

        # Create the output region
        self.output_region = regions.MatrixRegion(
            np.zeros((n_steps, size_out)),
            sliced_dimension=regions.MatrixPartitioning.columns
        )

        self.regions = [self.system_region, self.keys_region,
                        self.output_region]

        # Partition by output dimension to create vertices
        transmit_constraint = partition.Constraint(10)
        sdram_constraint = partition.Constraint(8*2**20)  # Max 8MiB
        constraints = {
            transmit_constraint: lambda s: s.stop - s.start,
            sdram_constraint: (
                lambda s: regions.utils.sizeof_regions(self.regions, s)),
        }
        for sl in partition.partition(slice(0, size_out), constraints):
            # Determine the resources
            resources = {
                Cores: 1,
                SDRAM: regions.utils.sizeof_regions(self.regions, sl),
            }
            vsl = VertexSlice(sl, self._label, get_application("value_source"),
                              resources)
            self.vertices.append(vsl)

        # Return the vertices and callback methods
        return netlistspec(self.vertices, self.load_to_machine,
                           self.before_simulation)
Exemplo n.º 2
0
def test_constraint():
    """Test creating a constraint."""
    # Constraints consist of: a limit, and an optional target.
    constraint = pac.Constraint(100)
    assert constraint.maximum == 100
    assert constraint.target == 1.0
    assert constraint.max_usage == 100.0

    constraint = pac.Constraint(100, 0.9)
    assert constraint.maximum == 100
    assert constraint.target == 0.9
    assert constraint.max_usage == 90.0
Exemplo n.º 3
0
    def test_single_partition_step(self):
        # Create the constraint
        constraint_a = pac.Constraint(100, .7)
        constraint_b = pac.Constraint(50)

        # Create the constraint -> usage mapping
        constraints = {
            constraint_a: lambda sl: sl.stop - sl.start + 10,
            constraint_b: lambda sl: sl.stop - sl.start
        }

        # Perform the partitioning
        assert list(pac.partition(
            slice(100), constraints)) == [slice(0, 50),
                                          slice(50, 100)]
Exemplo n.º 4
0
    def test_unpartitionable(self):
        # Create the constraint
        constraint_a = pac.Constraint(50)

        # Create the constraint -> usage mapping
        constraints = {constraint_a: lambda sl: sl.stop - sl.start + 100}

        # Perform the partitioning
        with pytest.raises(pac.UnpartitionableError):
            list(pac.partition(slice(100), constraints))
Exemplo n.º 5
0
    def test_no_partitioning(self):
        # Create the constraint
        constraint = pac.Constraint(100, 0.9)

        # Create the constraint -> usage mapping
        constraints = {constraint: lambda sl: sl.stop - sl.start + 10}

        # Perform the partitioning
        assert list(pac.partition(slice(0, 80), constraints)) == [slice(0, 80)]
        assert list(pac.partition(slice(80), constraints)) == [slice(0, 80)]
Exemplo n.º 6
0
    def test_just_partitionable(self):
        # Create the constraint
        constraint_a = pac.Constraint(50)

        # Create the constraint -> usage mapping
        constraints = {constraint_a: lambda sl: sl.stop - sl.start + 49}

        # Perform the partitioning
        assert (list(pac.partition(slice(100), constraints)) == [
            slice(n, n + 1) for n in range(100)
        ])  # pragma : no cover
Exemplo n.º 7
0
    def test_unpartitionable(self):
        # Create the constraint
        constraint = pac.Constraint(50)

        # Create the constraint -> usage mapping
        def cons(*slices):
            return sum(sl.stop - sl.start for sl in slices) + 50

        constraints = {constraint: cons}

        # Perform the partitioning
        with pytest.raises(pac.UnpartitionableError):
            list(pac.partition_multiple((slice(10), slice(2)), constraints))
Exemplo n.º 8
0
    def test_single_partition_step(self):
        # Create the constraint
        constraint = pac.Constraint(50)

        # Create the constraint -> usage mapping
        def cons(*slices):
            return sum(sl.stop - sl.start for sl in slices)

        constraints = {constraint: cons}

        # Perform the partitioning
        assert (list(
            pac.partition_multiple(
                (slice(80), slice(20)),
                constraints)) == [(slice(0, 40), slice(0, 10)),
                                  (slice(40, 80), slice(10, 20))])
Exemplo n.º 9
0
    def test_no_partitioning(self):
        # Create the constraint
        constraint = pac.Constraint(100, 0.9)

        # Create the constraint -> usage mapping
        def cons(*slices):
            return sum(sl.stop - sl.start for sl in slices) + 10

        constraints = {constraint: cons}

        # Perform the partitioning
        assert (list(
            pac.partition_multiple(
                (slice(0, 40), slice(0, 30)), constraints)) == list(
                    pac.partition_multiple(
                        (slice(40), slice(30)), constraints)) == [(slice(
                            0, 40), slice(0, 30))])
Exemplo n.º 10
0
    def make_vertices(self, model, n_steps):
        """Construct the data which can be loaded into the memory of a
        SpiNNaker machine.
        """
        # Build encoders, gain and bias regions
        params = model.params[self.ensemble]
        ens_regions = dict()

        # Convert the encoders combined with the gain to S1615 before creating
        # the region.
        encoders_with_gain = params.scaled_encoders
        ens_regions[EnsembleRegions.encoders] = regions.MatrixRegion(
            tp.np_to_fix(encoders_with_gain),
            sliced_dimension=regions.MatrixPartitioning.rows)

        # Combine the direct input with the bias before converting to S1615 and
        # creating the region.
        bias_with_di = params.bias + np.dot(encoders_with_gain,
                                            self.direct_input)
        assert bias_with_di.ndim == 1
        ens_regions[EnsembleRegions.bias] = regions.MatrixRegion(
            tp.np_to_fix(bias_with_di),
            sliced_dimension=regions.MatrixPartitioning.rows)

        # Convert the gains to S1615 before creating the region
        ens_regions[EnsembleRegions.gain] = regions.MatrixRegion(
            tp.np_to_fix(params.gain),
            sliced_dimension=regions.MatrixPartitioning.rows)

        # Extract all the filters from the incoming connections
        incoming = model.get_signals_to_object(self)

        (ens_regions[EnsembleRegions.input_filters],
         ens_regions[EnsembleRegions.input_routing]) = make_filter_regions(
             incoming[InputPort.standard],
             model.dt,
             True,
             model.keyspaces.filter_routing_tag,
             width=self.ensemble.size_in)
        (ens_regions[EnsembleRegions.inhibition_filters],
         ens_regions[EnsembleRegions.inhibition_routing]) = \
            make_filter_regions(
                incoming[EnsembleInputPort.global_inhibition], model.dt, True,
                model.keyspaces.filter_routing_tag, width=1
            )

        # Extract all the decoders for the outgoing connections and build the
        # regions for the decoders and the regions for the output keys.
        outgoing = model.get_signals_from_object(self)
        if OutputPort.standard in outgoing:
            decoders, output_keys = \
                get_decoders_and_keys(outgoing[OutputPort.standard], True)
        else:
            decoders = np.array([])
            output_keys = list()
        size_out = decoders.shape[0]

        ens_regions[EnsembleRegions.decoders] = regions.MatrixRegion(
            tp.np_to_fix(decoders / model.dt),
            sliced_dimension=regions.MatrixPartitioning.rows)
        ens_regions[EnsembleRegions.keys] = regions.KeyspacesRegion(
            output_keys,
            fields=[regions.KeyField({'cluster': 'cluster'})],
            partitioned_by_atom=True)

        # The population length region stores information about groups of
        # co-operating cores.
        ens_regions[EnsembleRegions.population_length] = \
            regions.ListRegion("I")

        # The ensemble region contains basic information about the ensemble
        ens_regions[EnsembleRegions.ensemble] = EnsembleRegion(
            model.machine_timestep, self.ensemble.size_in)

        # The neuron region contains information specific to the neuron type
        ens_regions[EnsembleRegions.neuron] = LIFRegion(
            model.dt, self.ensemble.neuron_type.tau_rc,
            self.ensemble.neuron_type.tau_ref)

        # Manage profiling
        n_profiler_samples = 0
        self.profiled = getconfig(model.config, self.ensemble, "profile",
                                  False)
        if self.profiled:
            # Try and get number of samples from config
            n_profiler_samples = getconfig(model.config, self.ensemble,
                                           "profile_num_samples")

            # If it's not specified, calculate sensible default
            if n_profiler_samples is None:
                n_profiler_samples = (len(EnsembleSlice.profiler_tag_names) *
                                      n_steps * 2)

        # Create profiler region
        ens_regions[EnsembleRegions.profiler] = regions.Profiler(
            n_profiler_samples)
        ens_regions[EnsembleRegions.ensemble].n_profiler_samples = \
            n_profiler_samples

        # Manage probes
        for probe in self.local_probes:
            if probe.attr in ("output", "spikes"):
                self.record_spikes = True
            elif probe.attr == "voltage":
                self.record_voltages = True
            else:
                raise NotImplementedError(
                    "Cannot probe {} on Ensembles".format(probe.attr))

        # Set the flags
        ens_regions[EnsembleRegions.ensemble].record_spikes = \
            self.record_spikes
        ens_regions[EnsembleRegions.ensemble].record_voltages = \
            self.record_voltages

        # Create the probe recording regions
        ens_regions[EnsembleRegions.spikes] = regions.SpikeRecordingRegion(
            n_steps if self.record_spikes else 0)
        ens_regions[EnsembleRegions.voltages] = regions.VoltageRecordingRegion(
            n_steps if self.record_voltages else 0)

        # Create constraints against which to partition, initially assume that
        # we can devote 16 cores to every problem.
        sdram_constraint = partition.Constraint(128 * 2**20,
                                                0.9)  # 90% of 128MiB
        dtcm_constraint = partition.Constraint(16 * 64 * 2**10,
                                               0.9)  # 90% of 16 cores DTCM

        # The number of cycles available is 200MHz * the machine timestep; or
        # 200 * the machine timestep in microseconds.
        cycles = 200 * model.machine_timestep
        cpu_constraint = partition.Constraint(cycles * 16,
                                              0.8)  # 80% of 16 cores compute

        # Form the constraints dictionary
        def _make_constraint(f, size_in, size_out, **kwargs):
            """Wrap a usage computation method to work with the partitioner."""
            def f_(vertex_slice):
                # Calculate the number of neurons
                n_neurons = vertex_slice.stop - vertex_slice.start

                # Call the original method
                return f(size_in, size_out, n_neurons, **kwargs)

            return f_

        partition_constraints = {
            sdram_constraint:
            _make_constraint(_lif_sdram_usage, self.ensemble.size_in,
                             size_out),
            dtcm_constraint:
            _make_constraint(_lif_dtcm_usage, self.ensemble.size_in, size_out),
            cpu_constraint:
            _make_constraint(_lif_cpu_usage, self.ensemble.size_in, size_out),
        }

        # Partition the ensemble to create clusters of co-operating cores
        self.clusters = list()
        vertices = list()
        constraints = list()
        for sl in partition.partition(slice(0, self.ensemble.n_neurons),
                                      partition_constraints):
            # For each slice we create a cluster of co-operating cores.  We
            # instantiate the cluster and then ask it to produce vertices which
            # will be added to the netlist.
            cluster = EnsembleCluster(sl, self.ensemble.size_in, size_out,
                                      ens_regions)
            self.clusters.append(cluster)

            # Get the vertices for the cluster
            cluster_vertices = cluster.make_vertices(cycles)
            vertices.extend(cluster_vertices)

            # Create a constraint which forces these vertices to be present on
            # the same chip
            constraints.append(SameChipConstraint(cluster_vertices))

        # Return the vertices and callback methods
        return netlistspec(vertices,
                           self.load_to_machine,
                           after_simulation_function=self.after_simulation,
                           constraints=constraints)
Exemplo n.º 11
0
    def make_vertices(self, cycles):
        """Partition the neurons onto multiple cores."""
        # Make reduced constraints to partition against, we don't partition
        # against SDRAM as we're already sure that there is sufficient SDRAM
        # (and if there isn't we can't possibly fit all the vertices on a
        # single chip).
        dtcm_constraint = partition.Constraint(64 * 2**10, 0.9)  # 90% of DTCM
        cpu_constraint = partition.Constraint(cycles, 0.8)  # 80% of compute

        # Get the number of neurons in this cluster
        n_neurons = self.neuron_slice.stop - self.neuron_slice.start

        # Form the constraints dictionary
        def _make_constraint(f, size_in, **kwargs):
            """Wrap a usage computation method to work with the partitioner."""
            def f_(neuron_slice, output_slice):
                # Calculate the number of neurons
                n_neurons = neuron_slice.stop - neuron_slice.start

                # Calculate the number of outgoing dimensions
                size_out = output_slice.stop - output_slice.start

                # Call the original method
                return f(size_in, size_out, n_neurons, **kwargs)

            return f_

        constraints = {
            dtcm_constraint:
            _make_constraint(_lif_dtcm_usage,
                             self.size_in,
                             n_neurons_in_cluster=n_neurons),
            cpu_constraint:
            _make_constraint(_lif_cpu_usage,
                             self.size_in,
                             n_neurons_in_cluster=n_neurons),
        }

        # Partition the slice of neurons that we have
        self.neuron_slices = list()
        output_slices = list()
        for neurons, outputs in partition.partition_multiple(
            (self.neuron_slice, slice(self.size_out)), constraints):
            self.neuron_slices.append(neurons)
            output_slices.append(outputs)

        n_slices = len(self.neuron_slices)
        assert n_slices <= 16  # Too many cores in the cluster

        # Also partition the input space
        input_slices = partition.divide_slice(slice(0, self.size_in), n_slices)

        # Zip these together to create the vertices
        all_slices = zip(input_slices, output_slices)
        for i, (in_slice, out_slice) in enumerate(all_slices):
            # Create the vertex
            vertex = EnsembleSlice(i, self.neuron_slices, in_slice, out_slice,
                                   self.regions)

            # Add to the list of vertices
            self.vertices.append(vertex)

        # Return all the vertices
        return self.vertices