Exemple #1
0
    def _allocate_key_for_partition(self, partition, vertex, placements,
                                    n_keys_map):
        """
        :param AbstractSingleSourcePartition partition:
        :param MachineVertex vertex:
        :param Placements placements:
        :param AbstractMachinePartitionNKeysMap n_keys_map:
        :rtype: PartitionRoutingInfo
        :raises PacmanRouteInfoAllocationException:
        """
        n_keys = n_keys_map.n_keys_for_partition(partition)
        if n_keys > MAX_KEYS_SUPPORTED:
            raise PacmanRouteInfoAllocationException(
                "This routing info allocator can only support up to {} keys "
                "for any given edge; cannot therefore allocate keys to {}, "
                "which is requesting {} keys".format(MAX_KEYS_SUPPORTED,
                                                     partition, n_keys))

        placement = placements.get_placement_of_vertex(vertex)
        if placement is None:
            raise PacmanRouteInfoAllocationException(
                "The vertex '{}' has no placement".format(vertex))

        keys_and_masks = list([
            BaseKeyAndMask(base_key=self._get_key_from_placement(placement),
                           mask=MASK)
        ])
        return PartitionRoutingInfo(keys_and_masks, partition)
    def __check_zones(self):
        # See if it could fit even before considerding fixed
        app_part_bits = self.__bits_needed(len(self.__atom_bits_per_app_part))
        if app_part_bits + self.__n_bits_atoms_and_mac > BITS_IN_KEY:
            raise PacmanRouteInfoAllocationException(
                "Unable to use ZonedRoutingInfoAllocator please select a "
                "different allocator as it needs {} + {} bits".format(
                    app_part_bits, self.__n_bits_atoms_and_mac))

        # Reserve fixed and check it still works
        self.__set_fixed_used()
        app_part_bits = self.__bits_needed(
            len(self.__atom_bits_per_app_part) + len(self.__fixed_used))
        if app_part_bits + self.__n_bits_atoms_and_mac > BITS_IN_KEY:
            raise PacmanRouteInfoAllocationException(
                "Unable to use ZonedRoutingInfoAllocator please select a "
                "different allocator as it needs {} + {} bits".format(
                    app_part_bits, self.__n_bits_atoms_and_mac))

        if not self.__flexible:
            # If using global see if the fixed M and X zones are too big
            if app_part_bits + self.__n_bits_machine + self.__n_bits_atoms > \
                    BITS_IN_KEY:
                # We know from above test that all will fit if flexible
                # Reduce the suggested size of n_bits_atoms
                self.__n_bits_atoms = \
                    BITS_IN_KEY - app_part_bits - self.__n_bits_machine
                self.__n_bits_atoms_and_mac = BITS_IN_KEY - app_part_bits
                logger.warning(
                    "The ZonedRoutingInfoAllocator could not use the global "
                    "approach for all vertexes.")
            else:
                # Set the size of atoms and machine for biggest of each
                self.__n_bits_atoms_and_mac = \
                    self.__n_bits_machine + self.__n_bits_atoms
    def __init__(self, fixed_mask, fields, free_space_list):

        self._fixed_mask = fixed_mask
        self._is_next_key = True
        self._free_space_list = free_space_list
        self._free_space_pos = 0
        self._next_key_read = False

        expanded_mask = utility_calls.expand_to_bit_array(fixed_mask)
        zeros = numpy.where(expanded_mask == 0)[0]
        self._n_mask_keys = 2 ** len(zeros)

        # If there are no fields, add the mask as a field
        the_fields = fields
        if fields is None or len(fields) == 0:
            n_ones = 32 - len(zeros)
            field_max = (2 ** n_ones) - 1
            the_fields = [Field(0, field_max, fixed_mask)]

        # Check that the fields don't cross each other
        for field in the_fields:
            for other_field in the_fields:
                if field != other_field and field.mask & other_field.mask != 0:
                    raise PacmanRouteInfoAllocationException(
                        "Field masks {} and {} overlap".format(
                            field.mask, other_field.mask))

        # Sort the fields by highest bit range first
        self._fields = sorted(the_fields, key=lambda field: field.value,
                              reverse=True)

        self._update_next_valid_fields()
        self._increment_space_until_valid_key()
Exemple #4
0
    def _allocate_keys_and_masks(self, fixed_mask, fields, partition_n_keys):
        # If there isn't a fixed mask, generate a fixed mask based on the
        # number of keys required
        masks_available = [fixed_mask]
        if fixed_mask is None:
            masks_available = self._get_possible_masks(partition_n_keys)

        # For each usable mask, try all of the possible keys and see if a
        # match is possible
        mask_found = None
        key_found = None
        mask = None
        for mask in masks_available:
            logger.debug("Trying mask {} for {} keys", hex(mask),
                         partition_n_keys)

            key_found = None
            for key in KeyFieldGenerator(mask, fields,
                                         self._free_space_tracker):
                logger.debug("Trying key {}", hex(key))

                # Check if all the key ranges can be allocated
                matched_all = True
                index = 0
                for (base_key, n_keys) in self._get_key_ranges(key, mask):
                    logger.debug("Finding slot for {}, n_keys={}",
                                 hex(base_key), n_keys)
                    index = self._find_slot(base_key, lo=index)
                    logger.debug("Slot for {} is {}", hex(base_key), index)
                    if index is None:
                        matched_all = False
                        break
                    space = self._check_allocation(index, base_key, n_keys)
                    logger.debug("Space for {} is {}", hex(base_key), space)
                    if space is None:
                        matched_all = False
                        break

                if matched_all:
                    logger.debug("Matched key {}", hex(key))
                    key_found = key
                    break

            # If we found a matching key, store the mask that worked
            if key_found is not None:
                logger.debug("Matched mask {}", hex(mask))
                mask_found = mask
                break

        # If we found a working key and mask that can be assigned,
        # allocate them. Otherwise raise an exception
        if key_found is None or mask_found is None:
            raise PacmanRouteInfoAllocationException(
                "Could not find space to allocate keys")

        for (base_key, n_keys) in self._get_key_ranges(key_found, mask):
            self._allocate_elements(base_key, n_keys)
        # If we get here, we can assign the keys to the edges
        return [BaseKeyAndMask(base_key=key_found, mask=mask)]
Exemple #5
0
    def _allocate_fixed_keys_and_masks(self, keys_and_masks, fixed_mask):
        # If there are fixed keys and masks, allocate them
        for key_and_mask in keys_and_masks:

            # If there is a fixed mask, check it doesn't clash
            if fixed_mask is not None and fixed_mask != key_and_mask.mask:
                raise PacmanRouteInfoAllocationException(
                    "Cannot meet conflicting constraints")

            # Go through the mask sets and allocate
            for key, n_keys in self._get_key_ranges(key_and_mask.key,
                                                    key_and_mask.mask):
                self._allocate_elements(key, n_keys)
Exemple #6
0
    def __init__(self, fixed_mask, fields, free_space_list):
        """
        :param int fixed_mask:
        :param fields:
        :type fields: list(Field) or None
        :param list(ElementFreeSpace) free_space_list:
        """

        self._fixed_mask = fixed_mask
        self._is_next_key = True
        self._free_space_list = free_space_list
        self._free_space_pos = 0
        self._next_key_read = False
        self._field_ones = dict()
        self._field_value = dict()

        expanded_mask = expand_to_bit_array(fixed_mask)
        zeros = numpy.where(expanded_mask == 0)[0]
        self._n_mask_keys = 2 ** len(zeros)

        # If there are no fields, add the mask as a field
        the_fields = fields
        if fields is None or not fields:
            n_ones = 32 - len(zeros)
            field_max = (2 ** n_ones) - 1
            the_fields = [Field(0, field_max, fixed_mask)]

        # Check that the fields don't cross each other
        for idx, field in enumerate(the_fields):
            for other_field in the_fields[idx+1:]:
                if field != other_field and field.mask & other_field.mask != 0:
                    raise PacmanRouteInfoAllocationException(
                        "Field masks {} and {} overlap".format(
                            field.mask, other_field.mask))

        # Sort the fields by highest bit range first
        self._fields = sorted(the_fields, key=lambda field: field.value,
                              reverse=True)

        self._update_next_valid_fields()
        self._increment_space_until_valid_key()
    def _allocate_fixed_keys_and_masks(self, keys_and_masks, fixed_mask):
        """ Allocate fixed keys and masks.

        :param iterable(BaseKeyAndMask) keys_and_masks:
            the fixed keys and masks combos
        :param fixed_mask: fixed mask
        :type fixed_mask: int or None
        :rtype: None
        :raises PacmanRouteInfoAllocationException:
        """
        # If there are fixed keys and masks, allocate them
        for key_and_mask in keys_and_masks:
            # If there is a fixed mask, check it doesn't clash
            if fixed_mask is not None and fixed_mask != key_and_mask.mask:
                raise PacmanRouteInfoAllocationException(
                    "Cannot meet conflicting constraints")

            # Go through the mask sets and allocate
            for key, n_keys in get_key_ranges(key_and_mask.key,
                                              key_and_mask.mask):
                self._allocate_elements(key, n_keys)
def get_edge_groups(machine_graph, traffic_type):
    """ Utility method to get groups of edges using any\
        :py:class:`pacman.model.constraints.key_allocator_constraints.KeyAllocatorSameKeyConstraint`\
        constraints.  Note that no checking is done here about conflicts\
        related to other constraints.

    :param machine_graph: the machine graph
    :param traffic_type: the traffic type to group
    """

    # mapping between partition and shared key group it is in
    partition_groups = dict()

    # process each partition one by one in a bubble sort kinda way
    for vertex in machine_graph.vertices:
        for partition in machine_graph.\
                get_outgoing_edge_partitions_starting_at_vertex(vertex):

            # only process partitions of the correct traffic type
            if partition.traffic_type == traffic_type:

                # Get a set of partitions that should be grouped together
                shared_key_constraints = locate_constraints_of_type(
                    partition.constraints, ShareKeyConstraint)
                partitions_to_group = [partition]
                for constraint in shared_key_constraints:
                    partitions_to_group.extend(constraint.other_partitions)

                # Get a set of groups that should be grouped
                groups_to_group = [
                    partition_groups.get(part_to_group, [part_to_group])
                    for part_to_group in partitions_to_group
                ]

                # Group the groups
                new_group = ConstraintGroup(part for group in groups_to_group
                                            for part in group)
                partition_groups.update(
                    {part: new_group
                     for part in new_group})

    # Keep track of groups
    fixed_key_groups = list()
    shared_key_groups = list()
    fixed_mask_groups = list()
    fixed_field_groups = list()
    flexi_field_groups = list()
    continuous_groups = list()
    noncontinuous_groups = list()
    groups_by_type = {
        FixedKeyAndMaskConstraint: fixed_key_groups,
        FixedMaskConstraint: fixed_mask_groups,
        FixedKeyFieldConstraint: fixed_field_groups,
        FlexiKeyFieldConstraint: flexi_field_groups,
    }
    groups = set(itervalues(partition_groups))
    for group in groups:

        # Get all expected constraints in the group
        constraints = [
            constraint for partition in group
            for constraint in locate_constraints_of_type(
                partition.constraints, (FixedKeyAndMaskConstraint,
                                        FixedMaskConstraint,
                                        FlexiKeyFieldConstraint,
                                        FixedKeyFieldConstraint))
        ]

        # Check that the possibly conflicting constraints are equal
        if constraints and not all(constraint_a == constraint_b
                                   for constraint_a in constraints
                                   for constraint_b in constraints):
            raise PacmanRouteInfoAllocationException(
                "The group of partitions {} have conflicting constraints".
                format(constraints))

        # If no constraints, must be one of the non-specific groups
        if not constraints:
            # If the group has only one item, it is not shared
            if len(group) == 1:
                continuous_constraints = [
                    constraint for partition in group
                    for constraint in locate_constraints_of_type(
                        constraints, ContiguousKeyRangeContraint)
                ]
                if continuous_constraints:
                    continuous_groups.append(group)
                else:
                    noncontinuous_groups.append(group)

            # If the group has more than one partition, it must be shared
            else:
                shared_key_groups.append(group)

        # If constraints found, put the group in the appropriate constraint
        # group
        else:
            group._set_constraint(constraints[0])
            constraint_type = type(constraints[0])
            groups_by_type[constraint_type].append(group)

    # return the set of groups
    return (fixed_key_groups, shared_key_groups, fixed_mask_groups,
            fixed_field_groups, flexi_field_groups, continuous_groups,
            noncontinuous_groups)
def get_mulitcast_edge_groups(machine_graph):
    """ Utility method to get groups of multicast edges using any\
        :py:class:`KeyAllocatorSameKeyConstraint` constraints.  Note that no\
        checking is done here about conflicts related to other constraints.

    :param MachineGraph machine_graph: the machine graph
    :return: (fixed key groups, shared key groups, fixed mask groups,
        fixed field groups, continuous groups, noncontinuous groups)
    :rtype: tuple(list(ConstraintGroup), list(ConstraintGroup),
        list(ConstraintGroup), list(ConstraintGroup), list(ConstraintGroup),
        list(ConstraintGroup))
    """

    # mapping between partition and shared key group it is in
    partition_groups = dict()

    # process each partition one by one in a bubble sort kinda way
    for vertex in machine_graph.vertices:
        for partition in machine_graph.\
                get_multicast_edge_partitions_starting_at_vertex(vertex):

            # Get a set of partitions that should be grouped together
            shared_key_constraints = locate_constraints_of_type(
                partition.constraints, ShareKeyConstraint)
            partitions_to_group = [partition]
            for constraint in shared_key_constraints:
                partitions_to_group.extend(constraint.other_partitions)

            # Get a set of groups that should be grouped
            groups_to_group = [
                partition_groups.get(part_to_group, [part_to_group])
                for part_to_group in partitions_to_group]

            # Group the groups
            new_group = ConstraintGroup(
                part for group in groups_to_group for part in group)
            partition_groups.update(
                {part: new_group for part in new_group})

    # Keep track of groups
    fixed_key_groups = list()
    shared_key_groups = list()
    fixed_mask_groups = list()
    fixed_field_groups = list()
    continuous_groups = list()
    noncontinuous_groups = list()
    groups_by_type = {
        FixedKeyAndMaskConstraint: fixed_key_groups,
        FixedMaskConstraint: fixed_mask_groups,
        FixedKeyFieldConstraint: fixed_field_groups,
    }
    groups = OrderedSet(partition_groups.values())
    for group in groups:

        # Get all expected constraints in the group
        constraints = [
            constraint for partition in group
            for constraint in locate_constraints_of_type(
                partition.constraints, _ALL_FIXED_TYPES)]

        # Check that the possibly conflicting constraints are equal
        if constraints and not all(
                constraint_a == constraint_b for constraint_a in constraints
                for constraint_b in constraints):
            raise PacmanRouteInfoAllocationException(
                "The group of partitions {} have conflicting constraints"
                .format(constraints))

        # If constraints found, put the group in the appropriate constraint
        # group
        if constraints:
            group._set_constraint(constraints[0])
            constraint_type = type(constraints[0])
            groups_by_type[constraint_type].append(group)
        # If no constraints, must be one of the non-specific groups
        # If the group has only one item, it is not shared
        elif len(group) == 1:
            continuous_constraints = (
                constraint for partition in group
                for constraint in locate_constraints_of_type(
                    constraints, ContiguousKeyRangeContraint))
            if any(continuous_constraints):
                continuous_groups.append(group)
            else:
                noncontinuous_groups.append(group)
        # If the group has more than one partition, it must be shared
        else:
            shared_key_groups.append(group)

    # return the set of groups
    return (fixed_key_groups, shared_key_groups, fixed_mask_groups,
            fixed_field_groups, continuous_groups, noncontinuous_groups)
Exemple #10
0
    def __call__(self, partitioned_graph, placements, n_keys_map):
        """
        Allocates routing information to the partitioned edges in a\
        partitioned graph

        :param partitioned_graph: The partitioned graph to allocate the \
                    outing info for
        :type partitioned_graph:\
                    :py:class:`pacman.model.partitioned_graph.partitioned_graph.PartitionedGraph`
        :param placements: The placements of the subvertices
        :type placements:\
                    :py:class:`pacman.model.placements.placements.Placements`
        :param n_keys_map: A map between the partitioned edges and the number\
                    of keys required by the edges
        :type n_keys_map:\
                    :py:class:`pacman.model.routing_info.abstract_partitioned_edge_n_keys_map.AbstractPartitionedEdgeNKeysMap`
        :return: The routing information
        :rtype: :py:class:`pacman.model.routing_info.routing_info.RoutingInfo`,
                :py:class:`pacman.model.routing_tables.multicast_routing_table.MulticastRoutingTable
        :raise pacman.exceptions.PacmanRouteInfoAllocationException: If\
                   something goes wrong with the allocation
        """

        # check that this algorithm supports the constraints put onto the
        # partitioned_edges
        supported_constraints = [KeyAllocatorContiguousRangeContraint]
        utility_calls.check_algorithm_can_support_constraints(
            constrained_vertices=partitioned_graph.partitions,
            supported_constraints=supported_constraints,
            abstract_constraint_type=AbstractKeyAllocatorConstraint)

        # take each subedge and create keys from its placement
        progress_bar = ProgressBar(len(partitioned_graph.subvertices),
                                   "Allocating routing keys")
        routing_infos = RoutingInfo()
        for subvert in partitioned_graph.subvertices:
            partitions = partitioned_graph.\
                outgoing_edges_partitions_from_vertex(subvert)
            for partition in partitions.values():
                n_keys = n_keys_map.n_keys_for_partition(partition)
                if n_keys > MAX_KEYS_SUPPORTED:
                    raise PacmanRouteInfoAllocationException(
                        "This routing info allocator can only support up to {}"
                        " keys for any given subedge; cannot therefore"
                        " allocate keys to {}, which is requesting {} keys".
                        format(MAX_KEYS_SUPPORTED, partition, n_keys))
                placement = placements.get_placement_of_subvertex(subvert)
                if placement is not None:
                    key = self._get_key_from_placement(placement)
                    keys_and_masks = list(
                        [BaseKeyAndMask(base_key=key, mask=MASK)])
                    subedge_routing_info = PartitionRoutingInfo(
                        keys_and_masks, partition)
                    routing_infos.add_partition_info(subedge_routing_info)
                else:
                    raise PacmanRouteInfoAllocationException(
                        "This subvertex '{}' has no placement! this should "
                        "never occur, please fix and try again.".format(
                            subvert))

            progress_bar.update()
        progress_bar.end()

        return {'routing_infos': routing_infos}