Beispiel #1
0
    def __get_neuron_resources(self, vertex_slice):
        """  Gets the resources of the neurons of a slice of atoms from a given
             app vertex.

        :param ~pacman.model.graphs.common.Slice vertex_slice: the slice
        :rtype: ~pacman.model.resources.ResourceContainer
        """
        n_record = len(self._governed_app_vertex.neuron_recordables)
        variable_sdram = self._governed_app_vertex.get_neuron_variable_sdram(
            vertex_slice)
        sdram = MultiRegionSDRAM()
        sdram.merge(self._governed_app_vertex.get_common_constant_sdram(
            n_record, NeuronProvenance.N_ITEMS + NeuronMainProvenance.N_ITEMS,
            PopulationNeuronsMachineVertex.COMMON_REGIONS))
        sdram.merge(self._governed_app_vertex.get_neuron_constant_sdram(
            vertex_slice, PopulationNeuronsMachineVertex.NEURON_REGIONS))
        sdram.add_cost(
            PopulationNeuronsMachineVertex.REGIONS.SDRAM_EDGE_PARAMS.value,
            NEURONS_SDRAM_PARAMS_SIZE)
        sdram.nest(
            len(PopulationNeuronsMachineVertex.REGIONS) + 1, variable_sdram)
        dtcm = self._governed_app_vertex.get_common_dtcm()
        dtcm += self._governed_app_vertex.get_neuron_dtcm(vertex_slice)
        cpu_cycles = self._governed_app_vertex.get_common_cpu()
        cpu_cycles += self._governed_app_vertex.get_neuron_cpu(vertex_slice)

        # set resources required from this object
        container = ResourceContainer(
            sdram=sdram, dtcm=DTCMResource(dtcm),
            cpu_cycles=CPUCyclesPerTickResource(cpu_cycles))

        # return the total resources.
        return container
    def __proj_dependent_synapse_sdram(self, incoming_projections):
        """ Get the SDRAM used by synapse cores dependent on the projections

        :param list(~spynnaker.pyNN.models.Projection) incoming_projections:
            The projections to consider in the calculations
        :rtype: ~pacman.model.resources.MultiRegionSDRAM
        """
        app_vertex = self._governed_app_vertex
        sdram = MultiRegionSDRAM()
        sdram.add_cost(
            PopulationSynapsesMachineVertexLead.SYNAPSE_REGIONS.pop_table,
            max(
                MasterPopTableAsBinarySearch.get_master_population_table_size(
                    incoming_projections), BYTES_PER_WORD))
        sdram.add_cost(
            PopulationSynapsesMachineVertexLead.SYNAPSE_REGIONS.
            connection_builder,
            max(app_vertex.get_synapse_expander_size(incoming_projections),
                BYTES_PER_WORD))
        sdram.add_cost(
            PopulationSynapsesMachineVertexLead.SYNAPSE_REGIONS.
            bitfield_filter,
            max(get_estimated_sdram_for_bit_field_region(incoming_projections),
                BYTES_PER_WORD))
        sdram.add_cost(
            PopulationSynapsesMachineVertexLead.SYNAPSE_REGIONS.
            bitfield_key_map,
            max(get_estimated_sdram_for_key_region(incoming_projections),
                BYTES_PER_WORD))
        return sdram
Beispiel #3
0
    def __bitfield_size(self):
        """ Work out how much SDRAM is needed by the bit fields

        :rtype: ~pacman.model.resources.MultiRegionSDRAM
        """
        if self.__bitfield_sz is None:
            sdram = MultiRegionSDRAM()
            projections = self._governed_app_vertex.incoming_projections
            sdram.add_cost(
                PopulationMachineVertex.SYNAPSE_REGIONS.bitfield_filter,
                get_estimated_sdram_for_bit_field_region(projections))
            sdram.add_cost(
                PopulationMachineVertex.SYNAPSE_REGIONS.bitfield_key_map,
                get_estimated_sdram_for_key_region(projections))
            sdram.add_cost(
                PopulationMachineVertex.SYNAPSE_REGIONS.bitfield_builder,
                exact_sdram_for_bit_field_builder_region())
            self.__bitfield_sz = sdram
        return self.__bitfield_sz
Beispiel #4
0
    def __get_constant_sdram(self, vertex_slice):
        """ returns the constant sdram used by the vertex slice.

        :param ~pacman.model.graphs.common.Slice vertex_slice:
            the atoms to get constant sdram of
        :rtype: ~pacman.model.resources.MultiRegionSDRAM
        """
        n_record = (
            len(self._governed_app_vertex.neuron_recordables) +
            len(self._governed_app_vertex.synapse_recordables))
        n_provenance = (
            NeuronProvenance.N_ITEMS + SynapseProvenance.N_ITEMS +
            MainProvenance.N_ITEMS + SpikeProcessingProvenance.N_ITEMS)
        sdram = MultiRegionSDRAM()
        sdram.merge(self._governed_app_vertex.get_common_constant_sdram(
            n_record, n_provenance, PopulationMachineVertex.COMMON_REGIONS))
        sdram.merge(self._governed_app_vertex.get_neuron_constant_sdram(
            vertex_slice, PopulationMachineVertex.NEURON_REGIONS))
        sdram.merge(self.__get_synapse_constant_sdram(vertex_slice))
        return sdram
    def __independent_synapse_sdram(self):
        """ Get the SDRAM used by all synapse cores independent of projections

        :rtype: ~pacman.model.resources.MultiRegionSDRAM
        """
        app_vertex = self._governed_app_vertex
        sdram = MultiRegionSDRAM()
        sdram.add_cost(
            PopulationSynapsesMachineVertexLead.SYNAPSE_REGIONS.synapse_params,
            max(app_vertex.get_synapse_params_size(), BYTES_PER_WORD))
        sdram.add_cost(
            PopulationSynapsesMachineVertexLead.SYNAPSE_REGIONS.
            bitfield_builder,
            max(exact_sdram_for_bit_field_builder_region(), BYTES_PER_WORD))
        return sdram
Beispiel #6
0
    def get_resources_used_by_atoms(self, vertex_slice):
        """  Gets the resources of a slice of atoms

        :param ~pacman.model.graphs.common.Slice vertex_slice: the slice
        :rtype: ~pacman.model.resources.ResourceContainer
        """
        # pylint: disable=arguments-differ
        variable_sdram = self.__get_variable_sdram(vertex_slice)
        constant_sdram = self.__get_constant_sdram(vertex_slice)
        sdram = MultiRegionSDRAM()
        sdram.nest(len(PopulationMachineVertex.REGIONS) + 1, variable_sdram)
        sdram.merge(constant_sdram)

        # set resources required from this object
        container = ResourceContainer(
            sdram=sdram, dtcm=self.__get_dtcm_cost(vertex_slice),
            cpu_cycles=self.__get_cpu_cost(vertex_slice))

        # return the total resources.
        return container
Beispiel #7
0
    def __get_synapse_constant_sdram(self, vertex_slice):

        """ Get the amount of fixed SDRAM used by synapse parts

        :param ~pacman.model.graphs.common.Slice vertex_slice:
            The slice of neurons to get the size of

        :rtype: ~pacman.model.resources.MultiRegionSDRAM
        """
        sdram = MultiRegionSDRAM()
        app_vertex = self._governed_app_vertex
        sdram.add_cost(
            PopulationMachineVertex.SYNAPSE_REGIONS.synapse_params,
            app_vertex.get_synapse_params_size())
        sdram.add_cost(
            PopulationMachineVertex.SYNAPSE_REGIONS.synapse_dynamics,
            app_vertex.get_synapse_dynamics_size(vertex_slice))
        sdram.add_cost(
            PopulationMachineVertex.SYNAPSE_REGIONS.structural_dynamics,
            self.__structural_size(vertex_slice))
        sdram.add_cost(
            PopulationMachineVertex.SYNAPSE_REGIONS.synaptic_matrix,
            self.__all_syn_block_size(vertex_slice))
        sdram.add_cost(
            PopulationMachineVertex.SYNAPSE_REGIONS.direct_matrix,
            app_vertex.all_single_syn_size)
        sdram.add_cost(
            PopulationMachineVertex.SYNAPSE_REGIONS.pop_table,
            MasterPopTableAsBinarySearch.get_master_population_table_size(
                app_vertex.incoming_projections))
        sdram.add_cost(
            PopulationMachineVertex.SYNAPSE_REGIONS.connection_builder,
            self.__synapse_expander_size())
        sdram.merge(self.__bitfield_size())
        return sdram
    def __get_synapse_resources(self, vertex_slice, shared_sdram=None):
        """  Get the resources of the synapses of a slice of atoms from a
             given app vertex.

        :param ~pacman.model.graphs.common.Slice vertex_slice: the slice
        :param ~pacman.model.resources.MultiRegionSDRAM shared_sdram:
            The SDRAM shared between cores, if this is to be included
        :rtype: ~pacman.model.resources.ResourceContainer
        """
        n_record = len(self._governed_app_vertex.synapse_recordables)
        variable_sdram = self._governed_app_vertex.get_synapse_variable_sdram(
            vertex_slice)
        sdram = MultiRegionSDRAM()
        sdram.merge(
            self._governed_app_vertex.get_common_constant_sdram(
                n_record, SynapseProvenance.N_ITEMS +
                SpikeProcessingFastProvenance.N_ITEMS,
                PopulationSynapsesMachineVertexLead.COMMON_REGIONS))

        sdram.add_cost(
            PopulationSynapsesMachineVertexLead.REGIONS.SDRAM_EDGE_PARAMS.
            value, SYNAPSES_SDRAM_PARAMS_SIZE)
        sdram.add_cost(
            PopulationSynapsesMachineVertexLead.REGIONS.KEY_REGION.value,
            KEY_CONFIG_SIZE)
        sdram.nest(
            len(PopulationSynapsesMachineVertexLead.REGIONS) + 1,
            variable_sdram)
        if shared_sdram is not None:
            sdram.merge(shared_sdram)
        dtcm = self._governed_app_vertex.get_common_dtcm()
        dtcm += self._governed_app_vertex.get_synapse_dtcm(vertex_slice)
        cpu_cycles = self._governed_app_vertex.get_common_cpu()
        cpu_cycles += self._governed_app_vertex.get_synapse_cpu(vertex_slice)

        # set resources required from this object
        container = ResourceContainer(
            sdram=sdram,
            dtcm=DTCMResource(dtcm),
            cpu_cycles=CPUCyclesPerTickResource(cpu_cycles))

        # return the total resources.
        return container
    def __shared_synapse_sdram(self, independent_synapse_sdram,
                               proj_dependent_sdram, all_syn_block_sz,
                               structural_sz, dynamics_sz):
        """ Get the SDRAM shared between synapse cores

        :rtype: ~pacman.model.resources.MultiRegionSDRAM
        """
        sdram = MultiRegionSDRAM()
        sdram.merge(independent_synapse_sdram)
        sdram.merge(proj_dependent_sdram)
        sdram.add_cost(
            PopulationSynapsesMachineVertexLead.SYNAPSE_REGIONS.
            synaptic_matrix, all_syn_block_sz)
        sdram.add_cost(
            PopulationSynapsesMachineVertexLead.SYNAPSE_REGIONS.direct_matrix,
            max(self._governed_app_vertex.all_single_syn_size, BYTES_PER_WORD))
        sdram.add_cost(
            PopulationSynapsesMachineVertexLead.SYNAPSE_REGIONS.
            structural_dynamics, structural_sz)
        sdram.add_cost(
            PopulationSynapsesMachineVertexLead.SYNAPSE_REGIONS.
            synapse_dynamics, dynamics_sz)
        return sdram
    def create_machine_vertices(self, resource_tracker, machine_graph):
        app_vertex = self._governed_app_vertex
        label = app_vertex.label
        constraints = get_remaining_constraints(app_vertex)

        # Structural plasticity can only be run on a single synapse core
        if (isinstance(app_vertex.synapse_dynamics,
                       AbstractSynapseDynamicsStructural)
                and self.__n_synapse_vertices != 1):
            raise SynapticConfigurationException(
                "The current implementation of structural plasticity can only"
                " be run on a single synapse core.  Please ensure the number"
                " of synapse cores is set to 1")

        # Do some checks to make sure everything is likely to fit
        atoms_per_core = min(app_vertex.get_max_atoms_per_core(),
                             app_vertex.n_atoms)
        n_synapse_types = app_vertex.neuron_impl.get_n_synapse_types()
        if (get_n_bits(atoms_per_core) + get_n_bits(n_synapse_types) +
                get_n_bits(self.__get_max_delay)) > MAX_RING_BUFFER_BITS:
            raise SynapticConfigurationException(
                "The combination of the number of neurons per core ({}), "
                "the number of synapse types ({}), and the maximum delay per "
                "core ({}) will require too much DTCM.  Please reduce one or "
                "more of these values.".format(atoms_per_core, n_synapse_types,
                                               self.__get_max_delay))

        self.__neuron_vertices = list()
        self.__synapse_vertices = list()
        self.__synapse_verts_by_neuron = defaultdict(list)

        incoming_direct_poisson = self.__handle_poisson_sources(
            label, machine_graph)

        # Work out the ring buffer shifts based on all incoming things
        rb_shifts = app_vertex.get_ring_buffer_shifts(
            app_vertex.incoming_projections)
        weight_scales = app_vertex.get_weight_scales(rb_shifts)

        # Get resources for synapses
        independent_synapse_sdram = self.__independent_synapse_sdram()
        proj_dependent_sdram = self.__proj_dependent_synapse_sdram(
            app_vertex.incoming_projections)

        for index, vertex_slice in enumerate(self.__get_fixed_slices()):

            # Find the maximum number of cores on any chip available
            max_crs = resource_tracker.get_maximum_cores_available_on_a_chip()
            if max_crs < (self.__n_synapse_vertices + 1):
                raise ConfigurationException(
                    "No chips remaining with enough cores for"
                    f" {self.__n_synapse_vertices} synapse cores and a neuron"
                    " core")
            max_crs -= self.__n_synapse_vertices + 1

            # Create the neuron vertex for the slice
            neuron_vertex, neuron_resources = self.__add_neuron_core(
                vertex_slice, label, index, rb_shifts, weight_scales,
                machine_graph, constraints)

            # Keep track of synapse vertices for each neuron vertex and
            # resources used by each core (neuron core is added later)
            synapse_vertices = list()
            self.__synapse_verts_by_neuron[neuron_vertex] = synapse_vertices
            all_resources = []

            # Add the first vertex
            synapse_references, syn_label = self.__add_lead_synapse_core(
                vertex_slice, independent_synapse_sdram, proj_dependent_sdram,
                label, rb_shifts, weight_scales, all_resources, machine_graph,
                synapse_vertices, neuron_vertex, constraints)

            # Do the remaining synapse cores
            for i in range(1, self.__n_synapse_vertices):
                self.__add_shared_synapse_core(syn_label, i, vertex_slice,
                                               synapse_references,
                                               all_resources, machine_graph,
                                               synapse_vertices, neuron_vertex,
                                               constraints)

            # Add resources for Poisson vertices up to core limit
            poisson_vertices = incoming_direct_poisson[vertex_slice]
            remaining_poisson_vertices = list()
            added_poisson_vertices = list()
            for poisson_vertex, poisson_edge in poisson_vertices:
                if max_crs <= 0:
                    remaining_poisson_vertices.append(poisson_vertex)
                    self.__add_poisson_multicast(poisson_vertex,
                                                 synapse_vertices,
                                                 machine_graph, poisson_edge)
                else:
                    all_resources.append(
                        (poisson_vertex.resources_required, []))
                    added_poisson_vertices.append(poisson_vertex)
                    max_crs -= 1

            if remaining_poisson_vertices:
                logger.warn(
                    f"Vertex {label} is using multicast for"
                    f" {len(remaining_poisson_vertices)} one-to-one Poisson"
                    " sources as not enough cores exist to put them on the"
                    " same chip")

            # Create an SDRAM edge partition
            sdram_label = "SDRAM {} Synapses-->Neurons:{}-{}".format(
                label, vertex_slice.lo_atom, vertex_slice.hi_atom)
            source_vertices = added_poisson_vertices + synapse_vertices
            sdram_partition = SourceSegmentedSDRAMMachinePartition(
                SYNAPSE_SDRAM_PARTITION_ID, sdram_label, source_vertices)
            machine_graph.add_outgoing_edge_partition(sdram_partition)
            neuron_vertex.set_sdram_partition(sdram_partition)

            # Add SDRAM edges for synapse vertices
            for source_vertex in source_vertices:
                edge_label = "SDRAM {}-->{}".format(source_vertex.label,
                                                    neuron_vertex.label)
                machine_graph.add_edge(
                    SDRAMMachineEdge(source_vertex, neuron_vertex, edge_label),
                    SYNAPSE_SDRAM_PARTITION_ID)
                source_vertex.set_sdram_partition(sdram_partition)

            # Add SDRAM edge requirements to the neuron SDRAM, as the resource
            # tracker will otherwise try to add another core for it
            extra_sdram = MultiRegionSDRAM()
            extra_sdram.merge(neuron_resources.sdram)
            extra_sdram.add_cost(
                len(extra_sdram.regions) + 1,
                sdram_partition.total_sdram_requirements())
            neuron_resources_plus = ResourceContainer(
                sdram=extra_sdram,
                dtcm=neuron_resources.dtcm,
                cpu_cycles=neuron_resources.cpu_cycles,
                iptags=neuron_resources.iptags,
                reverse_iptags=neuron_resources.reverse_iptags)
            all_resources.append((neuron_resources_plus, constraints))

            # Allocate all the resources to ensure they all fit
            resource_tracker.allocate_constrained_group_resources(
                all_resources)

        return True
Beispiel #11
0
    def test_sdram(self):
        """
        test that adding a SDRAM resource to a resource container works
        correctly
        """
        const1 = ConstantSDRAM(128)
        self.assertEqual(const1.get_total_sdram(None), 128)
        const2 = ConstantSDRAM(256)
        combo = const1 + const2
        self.assertEqual(combo.get_total_sdram(None), 128 + 256)
        combo = const1 - const2
        self.assertEqual(combo.get_total_sdram(None), 128 - 256)
        combo = const2 + const1
        self.assertEqual(combo.get_total_sdram(None), 256 + 128)
        combo = const2 - const1
        self.assertEqual(combo.get_total_sdram(None), 256 - 128)

        var1 = VariableSDRAM(124, 8)
        self.assertEqual(var1.get_total_sdram(100), 124 + 8 * 100)
        combo = var1 + const1
        self.assertEqual(combo.get_total_sdram(100), 124 + 8 * 100 + 128)
        combo = var1 - const1
        self.assertEqual(combo.get_total_sdram(100), 124 + 8 * 100 - 128)
        combo = const1 + var1
        self.assertEqual(combo.get_total_sdram(100), 128 + 124 + 8 * 100)
        combo = const1 - var1
        self.assertEqual(combo.get_total_sdram(100), 128 - (124 + 8 * 100))
        var2 = VariableSDRAM(234, 6)
        combo = var2 + var1
        self.assertEqual(combo.get_total_sdram(150), 234 + 124 + (6 + 8) * 150)
        combo = var2 - var1
        self.assertEqual(combo.get_total_sdram(150), 234 - 124 + (6 - 8) * 150)

        multi1 = MultiRegionSDRAM()
        multi1.add_cost(1, 100, 4)
        multi1.add_cost(2, 50, 3)
        multi1.add_cost("overheads", 20)
        multi2 = MultiRegionSDRAM()
        multi2.add_cost(MockEnum.ZERO, 88)
        multi2.add_cost(MockEnum.ONE, 72)
        multi2.add_cost("overheads", 22)

        combo = multi1 + multi2
        self.assertEqual(combo.get_total_sdram(150),
                         100 + 50 + 20 + 88 + 72 + 22 + (4 + 3) * 150)

        multi3 = MultiRegionSDRAM()
        multi3.nest("foo", multi1)
        multi3.nest("bar", multi2)

        multi1.merge(multi2)
        self.assertEqual(len(multi1.regions), 5)
        self.assertEqual(multi1.regions["overheads"], ConstantSDRAM(20 + 22))
        self.assertEqual(multi1.get_total_sdram(150),
                         100 + 50 + 20 + 88 + 72 + 22 + (4 + 3) * 150)
        self.assertEqual(multi1, combo)
        self.assertEqual(multi1, multi3)
        with tempfile.TemporaryFile(mode="w") as target:
            multi3.report(1000, target=target)