def __call__(self, partitioned_graph, machine): """ :param partitioned_graph: The partitioned_graph to measure :type partitioned_graph:\ :py:class:`pacman.model.partitioned_graph.partitioned_graph.PartitionedGraph` :return: The size of the graph in number of chips :rtype: int """ # check that the algorithm can handle the constraints utility_calls.check_algorithm_can_support_constraints( constrained_vertices=partitioned_graph.subvertices, supported_constraints=[PlacerChipAndCoreConstraint], abstract_constraint_type=AbstractPlacerConstraint) ordered_subverts = utility_calls.sort_objects_by_constraint_authority( partitioned_graph.subvertices) # Iterate over subvertices and allocate progress_bar = ProgressBar(len(ordered_subverts), "Measuring the partitioned graph") resource_tracker = ResourceTracker(machine) for subvertex in ordered_subverts: resource_tracker.allocate_constrained_resources( subvertex.resources_required, subvertex.constraints) progress_bar.update() progress_bar.end() return {'n_chips': len(resource_tracker.keys)}
def _do_allocation(self, ordered_subverts, placements, machine): # Iterate over subvertices and generate placements progress_bar = ProgressBar(len(ordered_subverts), "Placing graph vertices") resource_tracker = ResourceTracker( machine, self._generate_radial_chips(machine)) # iterate over subverts for subvertex_list in ordered_subverts: # if too many one to ones to fit on a chip, allocate individually if len(subvertex_list) > self.MAX_CORES_PER_CHIP_TO_CONSIDER: for subvertex in subvertex_list: self._allocate_individual(subvertex, placements, progress_bar, resource_tracker) else: # can allocate in one block # merge constraints placement_constraint, ip_tag_constraints, \ reverse_ip_tag_constraints = \ self._merge_constraints(subvertex_list) # locate most cores on a chip max_size_on_a_chip = resource_tracker.\ max_available_cores_on_chips_that_satisfy( placement_constraint, ip_tag_constraints, reverse_ip_tag_constraints) # if size fits block allocate, otherwise allocate individually if max_size_on_a_chip < len(subvertex_list): # collect resource requirement resources = list() for subvert in subvertex_list: resources.append(subvert.resources_required) # get cores cores = resource_tracker.allocate_group( resources, placement_constraint, ip_tag_constraints, reverse_ip_tag_constraints) # allocate cores to subverts for subvert, (x, y, p, _, _) in zip(subvertex_list, cores): placement = Placement(subvert, x, y, p) placements.add_placement(placement) progress_bar.update() else: for subvertex in subvertex_list: self._allocate_individual(subvertex, placements, progress_bar, resource_tracker) progress_bar.end()
def __call__(self, partitioned_graph, machine): # check that the algorithm can handle the constraints utility_calls.check_algorithm_can_support_constraints( constrained_vertices=partitioned_graph.subvertices, supported_constraints=[ PlacerRadialPlacementFromChipConstraint, TagAllocatorRequireIptagConstraint, TagAllocatorRequireReverseIptagConstraint, PlacerChipAndCoreConstraint], abstract_constraint_type=AbstractPlacerConstraint) placements = Placements() ordered_subverts = utility_calls.sort_objects_by_constraint_authority( partitioned_graph.subvertices) # Iterate over subvertices and generate placements progress_bar = ProgressBar(len(ordered_subverts), "Placing graph vertices") resource_tracker = ResourceTracker( machine, self._generate_radial_chips(machine)) for vertex in ordered_subverts: self._place_vertex(vertex, resource_tracker, machine, placements) progress_bar.update() progress_bar.end() return {'placements': placements}
def __call__(self, partitioned_graph, machine): """ Place a partitioned_graph so that each subvertex is placed on a\ core :param partitioned_graph: The partitioned_graph to place :type partitioned_graph:\ :py:class:`pacman.model.partitioned_graph.partitioned_graph.PartitionedGraph` :return: A set of placements :rtype: :py:class:`pacman.model.placements.placements.Placements` :raise pacman.exceptions.PacmanPlaceException: If something\ goes wrong with the placement """ # check that the algorithm can handle the constraints utility_calls.check_algorithm_can_support_constraints( constrained_vertices=partitioned_graph.subvertices, supported_constraints=[PlacerChipAndCoreConstraint], abstract_constraint_type=AbstractPlacerConstraint) placements = Placements() ordered_subverts = utility_calls.sort_objects_by_constraint_authority( partitioned_graph.subvertices) # Iterate over subvertices and generate placements progress_bar = ProgressBar(len(ordered_subverts), "Placing graph vertices") resource_tracker = ResourceTracker(machine) for subvertex in ordered_subverts: # Create and store a new placement anywhere on the board (x, y, p, _, _) = resource_tracker.allocate_constrained_resources( subvertex.resources_required, subvertex.constraints) placement = Placement(subvertex, x, y, p) placements.add_placement(placement) progress_bar.update() progress_bar.end() return {'placements': placements}
def __call__(self, graph, machine): utility_calls.check_algorithm_can_support_constraints( constrained_vertices=graph.vertices, supported_constraints=[PartitionerMaximumSizeConstraint], abstract_constraint_type=AbstractPartitionerConstraint) # start progress bar progress_bar = ProgressBar(len(graph.vertices), "Partitioning graph vertices") vertices = graph.vertices subgraph = PartitionedGraph(label="partitioned_graph for partitionable" "_graph {}".format(graph.label)) graph_to_subgraph_mapper = GraphMapper(graph.label, subgraph.label) resource_tracker = ResourceTracker(machine) # Partition one vertex at a time for vertex in vertices: # Get the usage of the first atom, then assume that this # will be the usage of all the atoms requirements = vertex.get_resources_used_by_atoms(Slice(0, 1), graph) # Locate the maximum resources available max_resources_available = \ resource_tracker.get_maximum_constrained_resources_available( vertex.constraints) # Find the ratio of each of the resources - if 0 is required, # assume the ratio is the max available atoms_per_sdram = self._get_ratio( max_resources_available.sdram.get_value(), requirements.sdram.get_value()) atoms_per_dtcm = self._get_ratio( max_resources_available.dtcm.get_value(), requirements.dtcm.get_value()) atoms_per_cpu = self._get_ratio( max_resources_available.cpu.get_value(), requirements.cpu.get_value()) max_atom_values = [atoms_per_sdram, atoms_per_dtcm, atoms_per_cpu] max_atoms_constraints = utility_calls.locate_constraints_of_type( vertex.constraints, PartitionerMaximumSizeConstraint) for max_atom_constraint in max_atoms_constraints: max_atom_values.append(max_atom_constraint.size) atoms_per_core = min(max_atom_values) # Partition into subvertices counted = 0 while counted < vertex.n_atoms: # Determine subvertex size remaining = vertex.n_atoms - counted if remaining > atoms_per_core: alloc = atoms_per_core else: alloc = remaining # Create and store new subvertex, and increment elements # counted if counted < 0 or counted + alloc - 1 < 0: raise PacmanPartitionException("Not enough resources" " available to create" " subvertex") vertex_slice = Slice(counted, counted + (alloc - 1)) subvertex_usage = vertex.get_resources_used_by_atoms( vertex_slice, graph) subvert = vertex.create_subvertex( vertex_slice, subvertex_usage, "{}:{}:{}".format(vertex.label, counted, (counted + (alloc - 1))), partition_algorithm_utilities. get_remaining_constraints(vertex)) subgraph.add_subvertex(subvert) graph_to_subgraph_mapper.add_subvertex( subvert, vertex_slice, vertex) counted = counted + alloc # update allocated resources resource_tracker.allocate_constrained_resources( subvertex_usage, vertex.constraints) # update and end progress bars as needed progress_bar.update() progress_bar.end() partition_algorithm_utilities.generate_sub_edges( subgraph, graph_to_subgraph_mapper, graph) return {'Partitioned_graph': subgraph, 'Graph_mapper': graph_to_subgraph_mapper}
def __call__(self, machine, placements): """ see AbstractTagAllocatorAlgorithm.allocate_tags """ resource_tracker = ResourceTracker(machine) # Check that the algorithm can handle the constraints progress_bar = ProgressBar(placements.n_placements, "Allocating tags") placements_with_tags = list() for placement in placements.placements: utility_calls.check_algorithm_can_support_constraints( constrained_vertices=[placement.subvertex], supported_constraints=[ TagAllocatorRequireIptagConstraint, TagAllocatorRequireReverseIptagConstraint ], abstract_constraint_type=AbstractTagAllocatorConstraint) if len(utility_calls.locate_constraints_of_type( placement.subvertex.constraints, AbstractTagAllocatorConstraint)): placements_with_tags.append(placement) progress_bar.update() # Go through and allocate the tags tags = Tags() for placement in placements_with_tags: vertex = placement.subvertex # Get the constraint details for the tags (board_address, ip_tags, reverse_ip_tags) =\ utility_calls.get_ip_tag_info(vertex.constraints) # Allocate the tags, first-come, first-served, using the # fixed placement of the vertex, and the required resources chips = [(placement.x, placement.y)] resources = vertex.resources_required (_, _, _, returned_ip_tags, returned_reverse_ip_tags) = \ resource_tracker.allocate_resources( resources, chips, placement.p, board_address, ip_tags, reverse_ip_tags) # Put the allocated ip tag information into the tag object if returned_ip_tags is not None: for (tag_constraint, (board_address, tag)) in zip( ip_tags, returned_ip_tags): ip_tag = IPTag( board_address, tag, tag_constraint.ip_address, tag_constraint.port, tag_constraint.strip_sdp) tags.add_ip_tag(ip_tag, vertex) # Put the allocated reverse ip tag information into the tag object if returned_reverse_ip_tags is not None: for (tag_constraint, (board_address, tag)) in zip( reverse_ip_tags, returned_reverse_ip_tags): reverse_ip_tag = ReverseIPTag( board_address, tag, tag_constraint.port, placement.x, placement.y, placement.p, tag_constraint.sdp_port) tags.add_reverse_ip_tag(reverse_ip_tag, vertex) progress_bar.end() return {'tags': tags}
def __call__(self, machine, placements): """ see AbstractTagAllocatorAlgorithm.allocate_tags """ resource_tracker = ResourceTracker(machine) # Check that the algorithm can handle the constraints progress_bar = ProgressBar(placements.n_placements, "Allocating tags") placements_with_tags = list() for placement in placements.placements: utility_calls.check_algorithm_can_support_constraints( constrained_vertices=[placement.subvertex], supported_constraints=[ TagAllocatorRequireIptagConstraint, TagAllocatorRequireReverseIptagConstraint ], abstract_constraint_type=AbstractTagAllocatorConstraint) if len( utility_calls.locate_constraints_of_type( placement.subvertex.constraints, AbstractTagAllocatorConstraint)): placements_with_tags.append(placement) progress_bar.update() # Go through and allocate the tags tags = Tags() for placement in placements_with_tags: vertex = placement.subvertex # Get the constraint details for the tags (board_address, ip_tags, reverse_ip_tags) =\ utility_calls.get_ip_tag_info(vertex.constraints) # Allocate the tags, first-come, first-served, using the # fixed placement of the vertex, and the required resources chips = [(placement.x, placement.y)] resources = vertex.resources_required (_, _, _, returned_ip_tags, returned_reverse_ip_tags) = \ resource_tracker.allocate_resources( resources, chips, placement.p, board_address, ip_tags, reverse_ip_tags) # Put the allocated ip tag information into the tag object if returned_ip_tags is not None: for (tag_constraint, (board_address, tag)) in zip(ip_tags, returned_ip_tags): ip_tag = IPTag(board_address, tag, tag_constraint.ip_address, tag_constraint.port, tag_constraint.strip_sdp) tags.add_ip_tag(ip_tag, vertex) # Put the allocated reverse ip tag information into the tag object if returned_reverse_ip_tags is not None: for (tag_constraint, (board_address, tag)) in zip(reverse_ip_tags, returned_reverse_ip_tags): reverse_ip_tag = ReverseIPTag(board_address, tag, tag_constraint.port, placement.x, placement.y, placement.p, tag_constraint.sdp_port) tags.add_reverse_ip_tag(reverse_ip_tag, vertex) progress_bar.end() return {'tags': tags}
def __call__(self, graph, machine): """ Partition a partitionable_graph so that each subvertex will fit\ on a processor within the machine :param graph: The partitionable_graph to partition :type graph:\ :py:class:`pacman.model.graph.partitionable_graph.PartitionableGraph` :param machine: The machine with respect to which to partition the\ partitionable_graph :type machine: :py:class:`spinn_machine.machine.Machine` :return: A partitioned_graph of partitioned vertices and partitioned\ edges :rtype:\ :py:class:`pacman.model.partitioned_graph.partitioned_graph.PartitionedGraph` :raise pacman.exceptions.PacmanPartitionException: If something\ goes wrong with the partitioning """ utility_calls.check_algorithm_can_support_constraints( constrained_vertices=graph.vertices, abstract_constraint_type=AbstractPartitionerConstraint, supported_constraints=[PartitionerMaximumSizeConstraint, PartitionerSameSizeAsVertexConstraint]) # Load the vertices and create the subgraph to fill vertices = graph.vertices subgraph = PartitionedGraph( label="partitioned graph for {}".format(graph.label)) graph_mapper = GraphMapper(graph.label, subgraph.label) # sort out vertex's by constraints vertices = utility_calls.sort_objects_by_constraint_authority(vertices) # Set up the progress n_atoms = 0 for vertex in vertices: n_atoms += vertex.n_atoms progress_bar = ProgressBar(n_atoms, "Partitioning graph vertices") resource_tracker = ResourceTracker(machine) # Partition one vertex at a time for vertex in vertices: # check that the vertex hasn't already been partitioned subverts_from_vertex = \ graph_mapper.get_subvertices_from_vertex(vertex) # if not, partition if subverts_from_vertex is None: self._partition_vertex( vertex, subgraph, graph_mapper, resource_tracker, graph) progress_bar.update(vertex.n_atoms) progress_bar.end() partition_algorithm_utilities.generate_sub_edges( subgraph, graph_mapper, graph) results = dict() results['partitioned_graph'] = subgraph results['graph_mapper'] = graph_mapper results['nChips'] = len(resource_tracker.keys) return results