def _partition_one_application_vertex(
            self, vertex, res_tracker, m_graph, mapper, plan_n_timesteps):
        """ Partitions a single application vertex.
        """
        # Compute how many atoms of this vertex we can put on one core
        atoms_per_core = self._compute_atoms_per_core(
            vertex, res_tracker, plan_n_timesteps)
        if atoms_per_core < 1.0:
            raise PacmanPartitionException(
                "Not enough resources available to create vertex")

        # Partition into vertices
        for first in range(0, vertex.n_atoms, int(atoms_per_core)):
            # Determine vertex size
            last = min(first + atoms_per_core, vertex.n_atoms) - 1
            if first < 0 or last < 0:
                raise PacmanPartitionException(
                    "Not enough resources available to create vertex")

            # Create and store new vertex, and increment elements first
            vertex_slice = Slice(first, last)
            resources = vertex.get_resources_used_by_atoms(vertex_slice)

            m_vertex = vertex.create_machine_vertex(
                vertex_slice, resources,
                "{}:{}:{}".format(vertex.label, first, last),
                get_remaining_constraints(vertex))
            m_graph.add_vertex(m_vertex)
            mapper.add_vertex_mapping(m_vertex, vertex_slice, vertex)

            # update allocated resources
            res_tracker.allocate_constrained_resources(
                resources, vertex.constraints)
Exemplo n.º 2
0
    def _partition_one_application_vertex(self, vertex, res_tracker, m_graph,
                                          mapper):
        """ Partitions a single application vertex.
        """
        # Compute how many atoms of this vertex we can put on one core
        atoms_per_core = self._compute_atoms_per_core(vertex, res_tracker)
        if atoms_per_core < 1.0:
            raise PacmanPartitionException(
                "Not enough resources available to create vertex")

        # Partition into vertices
        for first in range(0, vertex.n_atoms, int(atoms_per_core)):
            # Determine vertex size
            last = min(first + atoms_per_core, vertex.n_atoms) - 1
            if first < 0 or last < 0:
                raise PacmanPartitionException(
                    "Not enough resources available to create vertex")

            # Create and store new vertex, and increment elements first
            vertex_slice = Slice(first, last)
            resources = vertex.get_resources_used_by_atoms(vertex_slice)

            m_vertex = vertex.create_machine_vertex(
                vertex_slice, resources,
                "{}:{}:{}".format(vertex.label, first, last),
                utils.get_remaining_constraints(vertex))
            m_graph.add_vertex(m_vertex)
            mapper.add_vertex_mapping(m_vertex, vertex_slice, vertex)

            # update allocated resources
            res_tracker.allocate_constrained_resources(resources,
                                                       vertex.constraints)
    def _partition_by_atoms(
            self, vertices, n_atoms, max_atoms_per_core, subgraph, graph,
            graph_to_subgraph_mapper, resource_tracker):
        """ Try to partition subvertices on how many atoms it can fit on\
            each subvert

        :param vertices: the vertexes that need to be partitioned at the same \
                    time
        :type vertices: iterable list of\
                    :py:class:`pacman.model.partitionable_graph.abstract_partitionable_vertex.AbstractPartitionableVertex`
        :param n_atoms: the atoms of the first vertex
        :type n_atoms: int
        :param max_atoms_per_core: the max atoms from all the vertexes\
                    considered that have max_atom constraints
        :type max_atoms_per_core: int
        :param subgraph: the partitioned_graph of the problem space to put\
                    subverts in
        :type subgraph: :py:class:`pacman.model.subgraph.subgraph.Subgraph`
        :param graph: the partitionable_graph object
        :type graph:\
                    :py:class:`pacman.model.partitionable_graph.partitionable_graph.PartitionableGraph`
        :param graph_to_subgraph_mapper: the mapper from\
                    partitionable_graph to partitioned_graph
        :type graph_to_subgraph_mapper:\
                    py:class:'pacman.modelgraph_subgraph_mapper.graph_mapper.GraphMapper'
        :param resource_tracker: A tracker of assigned resources
        :type resource_tracker:\
                    :py:class:`pacman.utilities.resource_tracker.ResourceTracker`
        :type no_machine_time_steps: int
        """
        n_atoms_placed = 0
        while n_atoms_placed < n_atoms:

            lo_atom = n_atoms_placed
            hi_atom = lo_atom + max_atoms_per_core - 1
            if hi_atom >= n_atoms:
                hi_atom = n_atoms - 1

            # Scale down the number of atoms to fit the available resources
            used_placements, hi_atom = self._scale_down_resources(
                lo_atom, hi_atom, vertices, resource_tracker,
                max_atoms_per_core, graph)

            # Update where we are
            n_atoms_placed = hi_atom + 1

            # Create the subvertices
            for (vertex, used_resources) in used_placements:
                vertex_slice = Slice(lo_atom, hi_atom)
                subvertex = vertex.create_subvertex(
                    vertex_slice, used_resources,
                    "{}:{}:{}".format(vertex.label, lo_atom, hi_atom),
                    partition_algorithm_utilities.get_remaining_constraints(
                        vertex))

                # update objects
                subgraph.add_subvertex(subvertex)
                graph_to_subgraph_mapper.add_subvertex(
                    subvertex, vertex_slice, vertex)
    def _partition_by_atoms(self, vertices, n_atoms, max_atoms_per_core,
                            subgraph, graph, graph_to_subgraph_mapper,
                            resource_tracker):
        """ Try to partition subvertices on how many atoms it can fit on\
            each subvert

        :param vertices: the vertexes that need to be partitioned at the same \
                    time
        :type vertices: iterable list of\
                    :py:class:`pacman.model.partitionable_graph.abstract_partitionable_vertex.AbstractPartitionableVertex`
        :param n_atoms: the atoms of the first vertex
        :type n_atoms: int
        :param max_atoms_per_core: the max atoms from all the vertexes\
                    considered that have max_atom constraints
        :type max_atoms_per_core: int
        :param subgraph: the partitioned_graph of the problem space to put\
                    subverts in
        :type subgraph: :py:class:`pacman.model.subgraph.subgraph.Subgraph`
        :param graph: the partitionable_graph object
        :type graph:\
                    :py:class:`pacman.model.partitionable_graph.partitionable_graph.PartitionableGraph`
        :param graph_to_subgraph_mapper: the mapper from\
                    partitionable_graph to partitioned_graph
        :type graph_to_subgraph_mapper:\
                    py:class:'pacman.modelgraph_subgraph_mapper.graph_mapper.GraphMapper'
        :param resource_tracker: A tracker of assigned resources
        :type resource_tracker:\
                    :py:class:`pacman.utilities.resource_tracker.ResourceTracker`
        """
        n_atoms_placed = 0
        while n_atoms_placed < n_atoms:

            lo_atom = n_atoms_placed
            hi_atom = lo_atom + max_atoms_per_core - 1
            if hi_atom >= n_atoms:
                hi_atom = n_atoms - 1

            # Scale down the number of atoms to fit the available resources
            used_placements, hi_atom = self._scale_down_resources(
                lo_atom, hi_atom, vertices, resource_tracker,
                max_atoms_per_core, graph)

            # Update where we are
            n_atoms_placed = hi_atom + 1

            # Create the subvertices
            for (vertex, used_resources) in used_placements:
                vertex_slice = Slice(lo_atom, hi_atom)
                subvertex = vertex.create_subvertex(
                    vertex_slice, used_resources,
                    "{}:{}:{}".format(vertex.label, lo_atom, hi_atom),
                    partition_algorithm_utilities.get_remaining_constraints(
                        vertex))

                # update objects
                subgraph.add_subvertex(subvertex)
                graph_to_subgraph_mapper.add_subvertex(
                    subvertex, vertex_slice, vertex)
 def create_machine_vertices(self, resource_tracker, machine_graph):
     slices_resources_map = self.__split(resource_tracker)
     for vertex_slice in slices_resources_map:
         machine_vertex = self.create_machine_vertex(
             vertex_slice, slices_resources_map[vertex_slice],
             self.MACHINE_LABEL.format(self._governed_app_vertex.label,
                                       vertex_slice.lo_atom,
                                       vertex_slice.hi_atom),
             get_remaining_constraints(self._governed_app_vertex))
         machine_graph.add_vertex(machine_vertex)
     self._called = True
     return True
    def create_machine_vertices(self, resource_tracker, machine_graph,
                                app_graph):
        # pylint: disable=arguments-differ
        pre_slices, is_exact = self._other_splitter.get_out_going_slices()

        # check for exacts.
        if not is_exact:
            raise SpynnakerSplitterConfigurationException(
                self.NEED_EXACT_ERROR_MESSAGE)

        # create vertices correctly
        for index, vertex_slice in enumerate(pre_slices):
            vertex = self.create_machine_vertex(
                vertex_slice, index, resource_tracker,
                self.DELAY_EXTENSION_SLICE_LABEL.format(
                    self._other_splitter.governed_app_vertex, vertex_slice),
                get_remaining_constraints(self._governed_app_vertex),
                app_graph)
            machine_graph.add_vertex(vertex)
    def create_machine_vertices(self, resource_tracker, machine_graph):
        app_vertex = self._governed_app_vertex
        label = app_vertex.label
        constraints = get_remaining_constraints(app_vertex)

        # Structural plasticity can only be run on a single synapse core
        if (isinstance(app_vertex.synapse_dynamics,
                       AbstractSynapseDynamicsStructural)
                and self.__n_synapse_vertices != 1):
            raise SynapticConfigurationException(
                "The current implementation of structural plasticity can only"
                " be run on a single synapse core.  Please ensure the number"
                " of synapse cores is set to 1")

        # Do some checks to make sure everything is likely to fit
        atoms_per_core = min(app_vertex.get_max_atoms_per_core(),
                             app_vertex.n_atoms)
        n_synapse_types = app_vertex.neuron_impl.get_n_synapse_types()
        if (get_n_bits(atoms_per_core) + get_n_bits(n_synapse_types) +
                get_n_bits(self.__get_max_delay)) > MAX_RING_BUFFER_BITS:
            raise SynapticConfigurationException(
                "The combination of the number of neurons per core ({}), "
                "the number of synapse types ({}), and the maximum delay per "
                "core ({}) will require too much DTCM.  Please reduce one or "
                "more of these values.".format(atoms_per_core, n_synapse_types,
                                               self.__get_max_delay))

        self.__neuron_vertices = list()
        self.__synapse_vertices = list()
        self.__synapse_verts_by_neuron = defaultdict(list)

        incoming_direct_poisson = self.__handle_poisson_sources(
            label, machine_graph)

        # Work out the ring buffer shifts based on all incoming things
        rb_shifts = app_vertex.get_ring_buffer_shifts(
            app_vertex.incoming_projections)
        weight_scales = app_vertex.get_weight_scales(rb_shifts)

        # Get resources for synapses
        independent_synapse_sdram = self.__independent_synapse_sdram()
        proj_dependent_sdram = self.__proj_dependent_synapse_sdram(
            app_vertex.incoming_projections)

        for index, vertex_slice in enumerate(self.__get_fixed_slices()):

            # Find the maximum number of cores on any chip available
            max_crs = resource_tracker.get_maximum_cores_available_on_a_chip()
            if max_crs < (self.__n_synapse_vertices + 1):
                raise ConfigurationException(
                    "No chips remaining with enough cores for"
                    f" {self.__n_synapse_vertices} synapse cores and a neuron"
                    " core")
            max_crs -= self.__n_synapse_vertices + 1

            # Create the neuron vertex for the slice
            neuron_vertex, neuron_resources = self.__add_neuron_core(
                vertex_slice, label, index, rb_shifts, weight_scales,
                machine_graph, constraints)

            # Keep track of synapse vertices for each neuron vertex and
            # resources used by each core (neuron core is added later)
            synapse_vertices = list()
            self.__synapse_verts_by_neuron[neuron_vertex] = synapse_vertices
            all_resources = []

            # Add the first vertex
            synapse_references, syn_label = self.__add_lead_synapse_core(
                vertex_slice, independent_synapse_sdram, proj_dependent_sdram,
                label, rb_shifts, weight_scales, all_resources, machine_graph,
                synapse_vertices, neuron_vertex, constraints)

            # Do the remaining synapse cores
            for i in range(1, self.__n_synapse_vertices):
                self.__add_shared_synapse_core(syn_label, i, vertex_slice,
                                               synapse_references,
                                               all_resources, machine_graph,
                                               synapse_vertices, neuron_vertex,
                                               constraints)

            # Add resources for Poisson vertices up to core limit
            poisson_vertices = incoming_direct_poisson[vertex_slice]
            remaining_poisson_vertices = list()
            added_poisson_vertices = list()
            for poisson_vertex, poisson_edge in poisson_vertices:
                if max_crs <= 0:
                    remaining_poisson_vertices.append(poisson_vertex)
                    self.__add_poisson_multicast(poisson_vertex,
                                                 synapse_vertices,
                                                 machine_graph, poisson_edge)
                else:
                    all_resources.append(
                        (poisson_vertex.resources_required, []))
                    added_poisson_vertices.append(poisson_vertex)
                    max_crs -= 1

            if remaining_poisson_vertices:
                logger.warn(
                    f"Vertex {label} is using multicast for"
                    f" {len(remaining_poisson_vertices)} one-to-one Poisson"
                    " sources as not enough cores exist to put them on the"
                    " same chip")

            # Create an SDRAM edge partition
            sdram_label = "SDRAM {} Synapses-->Neurons:{}-{}".format(
                label, vertex_slice.lo_atom, vertex_slice.hi_atom)
            source_vertices = added_poisson_vertices + synapse_vertices
            sdram_partition = SourceSegmentedSDRAMMachinePartition(
                SYNAPSE_SDRAM_PARTITION_ID, sdram_label, source_vertices)
            machine_graph.add_outgoing_edge_partition(sdram_partition)
            neuron_vertex.set_sdram_partition(sdram_partition)

            # Add SDRAM edges for synapse vertices
            for source_vertex in source_vertices:
                edge_label = "SDRAM {}-->{}".format(source_vertex.label,
                                                    neuron_vertex.label)
                machine_graph.add_edge(
                    SDRAMMachineEdge(source_vertex, neuron_vertex, edge_label),
                    SYNAPSE_SDRAM_PARTITION_ID)
                source_vertex.set_sdram_partition(sdram_partition)

            # Add SDRAM edge requirements to the neuron SDRAM, as the resource
            # tracker will otherwise try to add another core for it
            extra_sdram = MultiRegionSDRAM()
            extra_sdram.merge(neuron_resources.sdram)
            extra_sdram.add_cost(
                len(extra_sdram.regions) + 1,
                sdram_partition.total_sdram_requirements())
            neuron_resources_plus = ResourceContainer(
                sdram=extra_sdram,
                dtcm=neuron_resources.dtcm,
                cpu_cycles=neuron_resources.cpu_cycles,
                iptags=neuron_resources.iptags,
                reverse_iptags=neuron_resources.reverse_iptags)
            all_resources.append((neuron_resources_plus, constraints))

            # Allocate all the resources to ensure they all fit
            resource_tracker.allocate_constrained_group_resources(
                all_resources)

        return True
    def _partition_by_atoms(
            self, vertices, n_atoms, max_atoms_per_core, machine_graph,
            graph_mapper, resource_tracker, progress, fixed_n_atoms=False):
        """ Try to partition vertices on how many atoms it can fit on\
            each vertex

        :param vertices:\
            the vertexes that need to be partitioned at the same time
        :type vertices:\
            iterable(:py:class:`pacman.model.graphs.application.ApplicationVertex`)
        :param n_atoms: the atoms of the first vertex
        :type n_atoms: int
        :param max_atoms_per_core:\
            the max atoms from all the vertexes considered that have max_atom\
            constraints
        :type max_atoms_per_core: int
        :param machine_graph: the machine graph
        :type machine_graph:\
            :py:class:`pacman.model.graphs.machine.MachineGraph`
        :param graph_mapper: the mapper between graphs
        :type graph_mapper:\
            :py:class:`pacman.model.graphs.common.GraphMapper'
        :param resource_tracker: A tracker of assigned resources
        :type resource_tracker:\
            :py:class:`pacman.utilities.ResourceTracker`
        :param progress: The progress bar
        :param fixed_n_atoms:\
            True if max_atoms_per_core is actually the fixed number of atoms\
            per core and cannot be reduced
        :type fixed_n_atoms: bool
        """
        n_atoms_placed = 0
        while n_atoms_placed < n_atoms:
            lo_atom = n_atoms_placed
            hi_atom = lo_atom + max_atoms_per_core - 1
            if hi_atom >= n_atoms:
                hi_atom = n_atoms - 1

            # Scale down the number of atoms to fit the available resources
            used_placements, hi_atom = self._scale_down_resources(
                lo_atom, hi_atom, vertices, resource_tracker,
                max_atoms_per_core, fixed_n_atoms)

            # Update where we are
            n_atoms_placed = hi_atom + 1

            # Create the vertices
            for (vertex, used_resources) in used_placements:
                vertex_slice = Slice(lo_atom, hi_atom)
                machine_vertex = vertex.create_machine_vertex(
                    vertex_slice, used_resources,
                    label="{}:{}:{}".format(vertex.label, lo_atom, hi_atom),
                    constraints=partition_utils.get_remaining_constraints(
                        vertex))

                # update objects
                machine_graph.add_vertex(machine_vertex)
                graph_mapper.add_vertex_mapping(
                    machine_vertex, vertex_slice, vertex)

                progress.update(vertex_slice.n_atoms)
    def _partition_by_atoms(
            self, vertices, plan_n_timesteps, n_atoms, max_atoms_per_core,
            machine_graph, graph_mapper, resource_tracker, progress,
            fixed_n_atoms=False):
        """ Try to partition vertices on how many atoms it can fit on\
            each vertex

        :param vertices:\
            the vertexes that need to be partitioned at the same time
        :type vertices:\
            iterable list of\
            :py:class:`pacman.model.graphs.application.ApplicationVertex`
        :param plan_n_timesteps: number of timesteps to plan for
        :type  plan_n_timesteps: int
            iterable(:py:class:`pacman.model.graphs.application.ApplicationVertex`)
        :param n_atoms: the atoms of the first vertex
        :type n_atoms: int
        :param max_atoms_per_core:\
            the max atoms from all the vertexes considered that have max_atom\
            constraints
        :type max_atoms_per_core: int
        :param machine_graph: the machine graph
        :type machine_graph:\
            :py:class:`pacman.model.graphs.machine.MachineGraph`
        :param graph_mapper: the mapper between graphs
        :type graph_mapper:\
            :py:class:`pacman.model.graphs.common.GraphMapper'
        :param resource_tracker: A tracker of assigned resources
        :type resource_tracker:\
            :py:class:`pacman.utilities.ResourceTracker`
        :param progress: The progress bar
        :param fixed_n_atoms:\
            True if max_atoms_per_core is actually the fixed number of atoms\
            per core and cannot be reduced
        :type fixed_n_atoms: bool
        """
        n_atoms_placed = 0
        while n_atoms_placed < n_atoms:
            lo_atom = n_atoms_placed
            hi_atom = lo_atom + max_atoms_per_core - 1
            if hi_atom >= n_atoms:
                hi_atom = n_atoms - 1

            # Scale down the number of atoms to fit the available resources
            used_placements, hi_atom = self._scale_down_resources(
                lo_atom, hi_atom, vertices, plan_n_timesteps, resource_tracker,
                max_atoms_per_core, fixed_n_atoms)

            # Update where we are
            n_atoms_placed = hi_atom + 1

            # Create the vertices
            for (vertex, used_resources) in used_placements:
                vertex_slice = Slice(lo_atom, hi_atom)
                machine_vertex = vertex.create_machine_vertex(
                    vertex_slice, used_resources,
                    label="{}:{}:{}".format(vertex.label, lo_atom, hi_atom),
                    constraints=get_remaining_constraints(vertex))

                # update objects
                machine_graph.add_vertex(machine_vertex)
                graph_mapper.add_vertex_mapping(
                    machine_vertex, vertex_slice, vertex)

                progress.update(vertex_slice.n_atoms)