def test_fixed_key_and_mask_constraint(self): c1 = FixedKeyAndMaskConstraint([BaseKeyAndMask(0xFF0, 0xFF8)]) self.constraint_there_and_back(c1) km = BaseKeyAndMask(0xFF0, 0xFF8) km2 = BaseKeyAndMask(0xFE0, 0xFF8) c2 = FixedKeyAndMaskConstraint([km, km2]) self.constraint_there_and_back(c2)
def __init__(self, key_space): self._key_space = key_space BaseKeyAndMask.__init__(self, base_key=key_space.get_value( constants.ROUTING_TAG), mask=key_space.get_mask(constants.ROUTING_TAG)) self._neuron_mask = key_space.get_mask(field=constants.INDEX_FIELD_ID)
def test_base_key_and_mask(self): with self.assertRaises(PacmanConfigurationException): BaseKeyAndMask(0xF0, 0x40) bkm1 = BaseKeyAndMask(0x40, 0xF0) assert bkm1 == bkm1 assert bkm1 != [] assert str(bkm1) == "KeyAndMask:0x40:0xf0" assert bkm1.n_keys == 268435456 bkm2 = BaseKeyAndMask(0x40000000, FULL_MASK & ~1) assert bkm1 != bkm2 assert bkm2.n_keys == 2 k, n = bkm2.get_keys() assert k.tolist() == [1073741824, 1073741825] assert n == 2
def test_base_key_and_mask(self): with self.assertRaises(PacmanConfigurationException): BaseKeyAndMask(0xF0, 0x40) bkm1 = BaseKeyAndMask(0x40, 0xF0) assert bkm1 == bkm1 assert bkm1 != [] assert str(bkm1) == "KeyAndMask:0x40:0xf0" assert bkm1.n_keys == 268435456 bkm2 = BaseKeyAndMask(0x40000000, _32_BITS & ~1) assert bkm1 != bkm2 assert bkm2.n_keys == 2 k, n = bkm2.get_keys() assert k.tolist() == [1073741824, 1073741825] assert n == 2
def test_share_key_with_fixed_key_on_new_partitions(self): machine_graph, n_keys_map, v1, v2, _v3, _v4, e1, e2, e3, e4 = \ self._integration_setup() partition = machine_graph.\ get_outgoing_edge_partition_starting_at_vertex(v1, "part1") other_partition = machine_graph.\ get_outgoing_edge_partition_starting_at_vertex(v2, "part2") partition.add_constraint(ShareKeyConstraint([other_partition])) other_partition.add_constraint(FixedKeyAndMaskConstraint( [BaseKeyAndMask(base_key=25, mask=0xFFFFFFF)])) results = malloc_based_routing_info_allocator( machine_graph, n_keys_map) key = results.get_first_key_from_partition( machine_graph.get_outgoing_edge_partition_starting_at_vertex( v1, "part1")) edge1_key = results.get_first_key_for_edge(e1) edge2_key = results.get_first_key_for_edge(e2) edge3_key = results.get_first_key_for_edge(e3) edge4_key = results.get_first_key_for_edge(e4) self.assertEqual(key, 25) self.assertEqual(edge1_key, key) self.assertEqual(edge2_key, key) self.assertEqual(edge3_key, key) self.assertNotEqual(edge4_key, key)
def test_allocate_mixed_keys(self): fixed_masks = [None, None, 0xFFFFFF00, 0xFFFFF800] n_keys = [200, 20, 20, 256] allocator = _MallocBasedRoutingInfoAllocator(None) allocator._allocate_fixed_keys_and_masks( [BaseKeyAndMask(0x800, 0xFFFFF800)], None) print(allocator._free_space_tracker) for mask, keys in zip(fixed_masks, n_keys): self._print_keys_and_masks( allocator._allocate_keys_and_masks(mask, None, keys)) print(allocator._free_space_tracker) print(allocator._free_space_tracker) error = ("Allocation has not resulted in the expected free space" " being available") self.assertEqual(len(allocator._free_space_tracker), 3, error) self.assertEqual(allocator._free_space_tracker[0].start_address, 0x120, error) self.assertEqual(allocator._free_space_tracker[0].size, 224, error) self.assertEqual(allocator._free_space_tracker[1].start_address, 0x300, error) self.assertEqual(allocator._free_space_tracker[1].size, 1280, error) self.assertEqual(allocator._free_space_tracker[2].start_address, 0x1800, error) self.assertEqual(allocator._free_space_tracker[2].size, 0x100000000 - 0x1800, error)
def get_outgoing_partition_constraints(self, partition): return [ FixedKeyAndMaskConstraint([ BaseKeyAndMask(self._partition_id_to_key[partition.identifier], self._DEFAULT_COMMAND_MASK) ]) ]
def get_outgoing_partition_constraints(self, partition): if self._virtual_key is not None: return list([ FixedKeyAndMaskConstraint( [BaseKeyAndMask(self._virtual_key, self._mask)]) ]) return list()
def _allocate_key_for_partition(self, partition, vertex, placements, n_keys_map): """ :param AbstractSingleSourcePartition partition: :param MachineVertex vertex: :param Placements placements: :param AbstractMachinePartitionNKeysMap n_keys_map: :rtype: PartitionRoutingInfo :raises PacmanRouteInfoAllocationException: """ n_keys = n_keys_map.n_keys_for_partition(partition) if n_keys > MAX_KEYS_SUPPORTED: raise PacmanRouteInfoAllocationException( "This routing info allocator can only support up to {} keys " "for any given edge; cannot therefore allocate keys to {}, " "which is requesting {} keys".format(MAX_KEYS_SUPPORTED, partition, n_keys)) placement = placements.get_placement_of_vertex(vertex) if placement is None: raise PacmanRouteInfoAllocationException( "The vertex '{}' has no placement".format(vertex)) keys_and_masks = list([ BaseKeyAndMask(base_key=self._get_key_from_placement(placement), mask=MASK) ]) return PartitionRoutingInfo(keys_and_masks, partition)
def _allocate_keys_and_masks(self, fixed_mask, fields, partition_n_keys): # If there isn't a fixed mask, generate a fixed mask based on the # number of keys required masks_available = [fixed_mask] if fixed_mask is None: masks_available = self._get_possible_masks(partition_n_keys) # For each usable mask, try all of the possible keys and see if a # match is possible mask_found = None key_found = None mask = None for mask in masks_available: logger.debug("Trying mask {} for {} keys", hex(mask), partition_n_keys) key_found = None for key in KeyFieldGenerator(mask, fields, self._free_space_tracker): logger.debug("Trying key {}", hex(key)) # Check if all the key ranges can be allocated matched_all = True index = 0 for (base_key, n_keys) in self._get_key_ranges(key, mask): logger.debug("Finding slot for {}, n_keys={}", hex(base_key), n_keys) index = self._find_slot(base_key, lo=index) logger.debug("Slot for {} is {}", hex(base_key), index) if index is None: matched_all = False break space = self._check_allocation(index, base_key, n_keys) logger.debug("Space for {} is {}", hex(base_key), space) if space is None: matched_all = False break if matched_all: logger.debug("Matched key {}", hex(key)) key_found = key break # If we found a matching key, store the mask that worked if key_found is not None: logger.debug("Matched mask {}", hex(mask)) mask_found = mask break # If we found a working key and mask that can be assigned, # allocate them. Otherwise raise an exception if key_found is None or mask_found is None: raise PacmanRouteInfoAllocationException( "Could not find space to allocate keys") for (base_key, n_keys) in self._get_key_ranges(key_found, mask): self._allocate_elements(base_key, n_keys) # If we get here, we can assign the keys to the edges return [BaseKeyAndMask(base_key=key_found, mask=mask)]
def get_outgoing_partition_constraints(self, partition): return [ FixedKeyAndMaskConstraint(keys_and_masks=[ BaseKeyAndMask( base_key=0, #upper part of the key, mask=0xFFFFFF00) ]) ] #256 neurons in the LSB bits ,
def get_outgoing_partition_constraints(self, partition): return [ FixedKeyAndMaskConstraint(keys_and_masks=[ BaseKeyAndMask( base_key=0, #upper part of the key, mask=MASK_IN) ]) ]
def test_share_key_with_conflicting_fixed_key_on_partitions(self): machine_graph, n_keys_map, v1, v2, _v3, _v4, _e1, _e2, _e3, _e4 = \ self._integration_setup() partition = machine_graph.\ get_outgoing_edge_partition_starting_at_vertex(v1, "part1") other_partition = machine_graph.\ get_outgoing_edge_partition_starting_at_vertex(v2, "part2") other_partition.add_constraint(ShareKeyConstraint([partition])) other_partition.add_constraint(FixedKeyAndMaskConstraint( [BaseKeyAndMask(base_key=30, mask=0xFFFFFFF)])) partition.add_constraint(FixedKeyAndMaskConstraint( [BaseKeyAndMask(base_key=25, mask=0xFFFFFFF)])) with self.assertRaises(PacmanRouteInfoAllocationException): malloc_based_routing_info_allocator(machine_graph, n_keys_map)
def get_incoming_partition_constraints(self, partition, graph_mapper): if isinstance(partition.pre_vertex, CommandSenderMachineVertex): return [] index = graph_mapper.get_machine_vertex_index(partition.pre_vertex) vertex_slice = graph_mapper.get_slice(partition.pre_vertex) mask = get_possible_masks(vertex_slice.n_atoms)[0] key = (0x1000 + index) << 16 return [FixedKeyAndMaskConstraint( keys_and_masks=[BaseKeyAndMask(key, mask)])]
def get_outgoing_partition_constraints(self, partition): base_key = app_constants.FILTER_BASE_KEY | ( self._row_id << app_constants.RETINA_Y_BIT_SHIFT) return [ FixedKeyAndMaskConstraint(keys_and_masks=[ BaseKeyAndMask(base_key=base_key, mask=app_constants.FILTER_BASE_MASK) ]) ]
def get_keys(self, key_array=None, offset=0, n_keys=None): """ Get the ordered list of keys that the combination allows :param key_array: \ Optional array into which the returned keys will be placed :type key_array: array-like of int :param offset: \ Optional offset into the array at which to start placing keys :type offset: int :param n_keys: \ Optional limit on the number of keys returned. If less than this\ number of keys are available, only the keys available will be added :type n_keys: int :return: A tuple of an array of keys and the number of keys added to\ the array :rtype: tuple(array-like of int, int) """ if self._key_space.user != constants.USER_FIELDS.NENGO.value: return BaseKeyAndMask.get_keys(key_array, offset, n_keys) else: # Get the position of the zeros in the mask - assume 32-bits unwrapped_mask = numpy.unpackbits( numpy.asarray([self._mask], dtype=">u4").view(dtype="uint8")) zeros = numpy.where(unwrapped_mask == 0)[0] # If there are no zeros, there is only one key in the range, so # return that if len(zeros) == 0: if key_array is None: key_array = numpy.zeros(1, dtype=">u4") key_array[offset] = self._base_key return key_array, 1 # We now know how many values there are - 2^len(zeros) max_n_keys = 2**len(zeros) if key_array is not None and len(key_array) < max_n_keys: max_n_keys = len(key_array) if n_keys is None or n_keys > max_n_keys: n_keys = max_n_keys if key_array is None: key_array = numpy.zeros(n_keys, dtype=">u4") # get keys keys = list() for index in range(0, n_keys): args = {constants.INDEX_FIELD_ID: index} keys.append(self._key_space(**args).get_value()) # for each key, create its key with the idea of a neuron ID being # continuous and live at an offset position from the bottom of # the key for index, key in enumerate(keys): key_array[index + offset] = key return key_array, n_keys
def get_incoming_partition_constraints(self, partition): if partition.identifier == self._partition_identifier: base_key = app_constants.RETINA_BASE_KEY | \ (self._row_id << app_constants.RETINA_Y_BIT_SHIFT) return [ FixedKeyAndMaskConstraint(keys_and_masks=[ BaseKeyAndMask(base_key=base_key, mask=app_constants.FILTER_BASE_MASK) ]) ] return []
def test_fixed_key_and_mask_constraint(self): c1 = FixedKeyAndMaskConstraint([BaseKeyAndMask(0xFF0, 0xFF8)]) km = BaseKeyAndMask(0xFF0, 0xFF8) c2 = FixedKeyAndMaskConstraint([km]) c3 = FixedKeyAndMaskConstraint([BaseKeyAndMask(0xFE0, 0xFF8)]) c4 = FixedKeyAndMaskConstraint([km, BaseKeyAndMask(0xFE0, 0xFF8)]) self.assertEqual(c1, c2) self.assertIsNone(c1.key_list_function) self.assertEqual(c1.keys_and_masks, [km]) r = ("FixedKeyAndMaskConstraint(keys_and_masks=[KeyAndMask:0xff0:" "0xff8], key_list_function=None)") self.assertEqual(str(c1), r) d = {} d[c1] = 1 d[c2] = 2 d[c3] = 3 self.assertEqual(len(d), 2) self.assertEqual(d[c1], 2) self.assertNotEqual(c4, c1) self.assertNotEqual(c1, c4)
def get_outgoing_partition_constraints(self, partition): if partition.identifier == app_constants.EDGE_PARTITION_MAIN_TO_FILTER: return [ FixedKeyAndMaskConstraint(keys_and_masks=[ BaseKeyAndMask( base_key=app_constants.MAIN_PARTICLE_ROI_KEY, mask=app_constants.MESSAGE_TYPE_MASK) ]) ] elif partition.identifier == app_constants.EDGE_PARTITION_TARGET_POSITION: return [ FixedKeyAndMaskConstraint(keys_and_masks=[ BaseKeyAndMask( base_key=app_constants.MAIN_PARTICLE_TARGET_KEY, mask=app_constants.MESSAGE_TYPE_MASK) ]) ] elif partition.identifier == app_constants.EDGE_PARTITION_PARTICLE_TO_PARTICLE: return [] else: raise Exception("Asking for a partition not defined")
def test_allocate_fixed_key_and_mask(self): allocator = MallocBasedRoutingInfoAllocator() allocator._allocate_fixed_keys_and_masks( [BaseKeyAndMask(0x800, 0xFFFFF800)], None) error = ("Allocation has not resulted in the expected free space" " being available") print(allocator._free_space_tracker) self.assertEqual(len(allocator._free_space_tracker), 2, error) self.assertEqual(allocator._free_space_tracker[0].start_address, 0, error) self.assertEqual(allocator._free_space_tracker[0].size, 2048, error) self.assertEqual(allocator._free_space_tracker[1].start_address, 0x1000, error) self.assertEqual(allocator._free_space_tracker[1].size, 0xFFFFF000, error)
def __call__(self, machine_graph): for outgoing_partition in machine_graph.outgoing_edge_partitions: if not isinstance(outgoing_partition, MulticastEdgePartition): continue mac_vertex = outgoing_partition.pre_vertex if isinstance(mac_vertex, ReverseIPTagMulticastSourceMachineVertex): if mac_vertex.vertex_slice.lo_atom == 0: outgoing_partition.add_constraint( FixedKeyAndMaskConstraint( [BaseKeyAndMask(base_key=0, mask=0xFFFFFFc0)])) else: outgoing_partition.add_constraint( FixedKeyAndMaskConstraint( [BaseKeyAndMask(base_key=64, mask=0xFFFFFFc0)])) elif isinstance(mac_vertex, DelayExtensionMachineVertex): if mac_vertex.vertex_slice.lo_atom == 0: outgoing_partition.add_constraint( FixedKeyAndMaskConstraint( [BaseKeyAndMask(base_key=128, mask=0xFFFFFFc0)])) else: outgoing_partition.add_constraint( FixedKeyAndMaskConstraint( [BaseKeyAndMask(base_key=192, mask=0xFFFFFFc0)]))
def _allocate_partition_route(self, edge, placements, graph, n_keys_map): destination = edge.post_vertex placement = placements.get_placement_of_vertex(destination) keys_and_masks = list([ BaseKeyAndMask(base_key=self._get_key_from_placement(placement), mask=self.MASK) ]) partition = graph.get_outgoing_edge_partition_starting_at_vertex( edge.pre_vertex) n_keys = n_keys_map.n_keys_for_partition(partition) if n_keys > self.MAX_KEYS_SUPPORTED: raise PacmanConfigurationException( "Only edges which require less than {} keys are" " supported".format(self.MAX_KEYS_SUPPORTED)) return PartitionRoutingInfo(keys_and_masks, edge)
def __allocate(self): multicast_partitions = self.__machine_graph.multicast_partitions progress = ProgressBar(len(multicast_partitions), "Allocating routing keys") routing_infos = RoutingInfo() app_part_index = 0 for app_id in progress.over(multicast_partitions): while app_part_index in self.__fixed_used: app_part_index += 1 for partition_name, paritition_vertices in \ multicast_partitions[app_id].items(): # convert set to a list and sort by slice machine_vertices = list(paritition_vertices) machine_vertices.sort(key=lambda x: x.vertex_slice.lo_atom) n_bits_atoms = self.__atom_bits_per_app_part[(app_id, partition_name)] if self.__flexible: n_bits_machine = self.__n_bits_atoms_and_mac - n_bits_atoms else: if n_bits_atoms <= self.__n_bits_atoms: # Ok it fits use global sizes n_bits_atoms = self.__n_bits_atoms n_bits_machine = self.__n_bits_machine else: # Nope need more bits! Use the flexible approach here n_bits_machine = \ self.__n_bits_atoms_and_mac - n_bits_atoms for machine_index, vertex in enumerate(machine_vertices): partition = self.__machine_graph.\ get_outgoing_edge_partition_starting_at_vertex( vertex, partition_name) if partition in self.__fixed_partitions: # Ignore zone calculations and just use fixed keys_and_masks = self.__fixed_partitions[partition] else: mask = self.__mask(n_bits_atoms) key = app_part_index key = (key << n_bits_machine) | machine_index key = key << n_bits_atoms keys_and_masks = [ BaseKeyAndMask(base_key=key, mask=mask) ] routing_infos.add_partition_info( PartitionRoutingInfo(keys_and_masks, partition)) app_part_index += 1 return routing_infos
def generate_constraints(self, machine_vertex, partition_identifier): """ Returns the fixed keys for the outgoing edge partition. """ partition = self._get_partition(partition_identifier) if partition is None: raise KeyError("I've never heard of parition: {}".format( partition_identifier)) base_key = partition.get_first_key(machine_vertex) n_keys = machine_vertex.get_n_keys_for_partition( partition_identifier) keys_and_masks = [BaseKeyAndMask(key, globals.mask) for key in range(base_key, base_key + n_keys)] return [FixedKeyAndMaskConstraint(keys_and_masks)]
def get_outgoing_partition_constraints(self, partition): return [FixedKeyAndMaskConstraint([BaseKeyAndMask(0x67890000, 0xFFFF0000)])]
def get_outgoing_partition_constraints(self, partition): return [ FixedKeyAndMaskConstraint( [BaseKeyAndMask(self._fixed_key, self._fixed_mask)]) ]
def test_routing_info(self): partition = MachineOutgoingEdgePartition("Test") pre_vertex = SimpleMachineVertex(resources=ResourceContainer()) post_vertex = SimpleMachineVertex(resources=ResourceContainer()) edge = MachineEdge(pre_vertex, post_vertex) key = 12345 partition_info = PartitionRoutingInfo([BaseKeyAndMask(key, _32_BITS)], partition) partition.add_edge(edge) routing_info = RoutingInfo([partition_info]) with self.assertRaises(PacmanAlreadyExistsException): routing_info.add_partition_info(partition_info) assert routing_info.get_first_key_from_partition(partition) == key assert routing_info.get_first_key_from_partition(None) is None assert routing_info.get_routing_info_from_partition(partition) == \ partition_info assert routing_info.get_routing_info_from_partition(None) is None assert routing_info.get_routing_info_from_pre_vertex( pre_vertex, "Test") == partition_info assert routing_info.get_routing_info_from_pre_vertex( post_vertex, "Test") is None assert routing_info.get_routing_info_from_pre_vertex( pre_vertex, "None") is None assert routing_info.get_first_key_from_pre_vertex(pre_vertex, "Test") == key assert routing_info.get_first_key_from_pre_vertex(post_vertex, "Test") is None assert routing_info.get_first_key_from_pre_vertex(pre_vertex, "None") is None assert routing_info.get_routing_info_for_edge(edge) == partition_info assert routing_info.get_routing_info_for_edge(None) is None assert routing_info.get_first_key_for_edge(edge) == key assert routing_info.get_first_key_for_edge(None) is None assert next(iter(routing_info)) == partition_info partition2 = MachineOutgoingEdgePartition("Test") partition2.add_edge(MachineEdge(pre_vertex, post_vertex)) with self.assertRaises(PacmanAlreadyExistsException): routing_info.add_partition_info( PartitionRoutingInfo([BaseKeyAndMask(key, _32_BITS)], partition2)) assert partition != partition2 partition3 = MachineOutgoingEdgePartition("Test2") partition3.add_edge(MachineEdge(pre_vertex, post_vertex)) routing_info.add_partition_info( PartitionRoutingInfo([BaseKeyAndMask(key, _32_BITS)], partition3)) assert routing_info.get_routing_info_from_partition(partition) != \ routing_info.get_routing_info_from_partition(partition3) assert partition != partition3 assert routing_info.get_routing_info_from_partition( partition3).get_keys().tolist() == [key] partition3 = MachineOutgoingEdgePartition("Test3") partition3.add_edge(MachineEdge(pre_vertex, post_vertex)) routing_info.add_partition_info( PartitionRoutingInfo([ BaseKeyAndMask(key, _32_BITS), BaseKeyAndMask(key * 2, _32_BITS) ], partition3)) assert routing_info.get_routing_info_from_partition( partition3).get_keys().tolist() == [key, key * 2]
def test_routing_info(self): # mock to avoid having to create a graph for this test graph_code = 123 pre_vertex = SimpleMachineVertex(resources=ResourceContainer()) partition = MulticastEdgePartition(pre_vertex, "Test") partition.register_graph_code(graph_code) # This is a hack post_vertex = SimpleMachineVertex(resources=ResourceContainer()) edge = MachineEdge(pre_vertex, post_vertex) key = 12345 partition_info = PartitionRoutingInfo([BaseKeyAndMask(key, FULL_MASK)], partition) partition.add_edge(edge, graph_code) routing_info = RoutingInfo([partition_info]) with self.assertRaises(PacmanAlreadyExistsException): routing_info.add_partition_info(partition_info) assert routing_info.get_first_key_from_partition(partition) == key assert routing_info.get_first_key_from_partition(None) is None assert routing_info.get_routing_info_from_partition(partition) == \ partition_info assert routing_info.get_routing_info_from_partition(None) is None assert routing_info.get_routing_info_from_pre_vertex( pre_vertex, "Test") == partition_info assert routing_info.get_routing_info_from_pre_vertex( post_vertex, "Test") is None assert routing_info.get_routing_info_from_pre_vertex( pre_vertex, "None") is None assert routing_info.get_first_key_from_pre_vertex(pre_vertex, "Test") == key assert routing_info.get_first_key_from_pre_vertex(post_vertex, "Test") is None assert routing_info.get_first_key_from_pre_vertex(pre_vertex, "None") is None assert routing_info.get_routing_info_for_edge(edge) == partition_info assert routing_info.get_routing_info_for_edge(None) is None assert routing_info.get_first_key_for_edge(edge) == key assert routing_info.get_first_key_for_edge(None) is None assert next(iter(routing_info)) == partition_info partition2 = MulticastEdgePartition(pre_vertex, "Test") partition2.register_graph_code(graph_code) # This is a hack partition2.add_edge(MachineEdge(pre_vertex, post_vertex), graph_code) with self.assertRaises(PacmanAlreadyExistsException): routing_info.add_partition_info( PartitionRoutingInfo([BaseKeyAndMask(key, FULL_MASK)], partition2)) assert partition != partition2 partition3 = MulticastEdgePartition(pre_vertex, "Test2") partition3.register_graph_code(graph_code) # This is a hack partition3.add_edge(MachineEdge(pre_vertex, post_vertex), graph_code) routing_info.add_partition_info( PartitionRoutingInfo([BaseKeyAndMask(key, FULL_MASK)], partition3)) assert routing_info.get_routing_info_from_partition(partition) != \ routing_info.get_routing_info_from_partition(partition3) assert partition != partition3 assert routing_info.get_routing_info_from_partition( partition3).get_keys().tolist() == [key] partition4 = MulticastEdgePartition(pre_vertex, "Test4") partition4.register_graph_code(graph_code) # This is a hack partition4.add_edge(MachineEdge(pre_vertex, post_vertex), graph_code) routing_info.add_partition_info( PartitionRoutingInfo([ BaseKeyAndMask(key, FULL_MASK), BaseKeyAndMask(key * 2, FULL_MASK) ], partition4)) assert routing_info.get_routing_info_from_partition( partition4).get_keys().tolist() == [key, key * 2]
def key_and_mask(self): """ Convenience method to get the key and mask as an object :rtype: BaseKeyAndMask """ return BaseKeyAndMask(self.app_key, self.app_mask)
def key_mask_from_json(json_dict): return BaseKeyAndMask(json_dict["key"], json_dict["mask"])
def test_write_synaptic_matrix_and_master_population_table(self): MockSimulator.setup() default_config_paths = os.path.join( os.path.dirname(abstract_spinnaker_common.__file__), AbstractSpiNNakerCommon.CONFIG_FILE_NAME) config = conf_loader.load_config( AbstractSpiNNakerCommon.CONFIG_FILE_NAME, default_config_paths) config.set("Simulation", "one_to_one_connection_dtcm_max_bytes", 40) machine_time_step = 1000.0 pre_app_vertex = SimpleApplicationVertex(10) pre_vertex = SimpleMachineVertex(resources=None) pre_vertex_slice = Slice(0, 9) post_app_vertex = SimpleApplicationVertex(10) post_vertex = SimpleMachineVertex(resources=None) post_vertex_slice = Slice(0, 9) post_slice_index = 0 one_to_one_connector_1 = OneToOneConnector(None) one_to_one_connector_1.set_projection_information( pre_app_vertex, post_app_vertex, None, machine_time_step) one_to_one_connector_1.set_weights_and_delays(1.5, 1.0) one_to_one_connector_2 = OneToOneConnector(None) one_to_one_connector_2.set_projection_information( pre_app_vertex, post_app_vertex, None, machine_time_step) one_to_one_connector_2.set_weights_and_delays(2.5, 2.0) all_to_all_connector = AllToAllConnector(None) all_to_all_connector.set_projection_information( pre_app_vertex, post_app_vertex, None, machine_time_step) all_to_all_connector.set_weights_and_delays(4.5, 4.0) direct_synapse_information_1 = SynapseInformation( one_to_one_connector_1, SynapseDynamicsStatic(), 0) direct_synapse_information_2 = SynapseInformation( one_to_one_connector_2, SynapseDynamicsStatic(), 1) all_to_all_synapse_information = SynapseInformation( all_to_all_connector, SynapseDynamicsStatic(), 0) app_edge = ProjectionApplicationEdge(pre_app_vertex, post_app_vertex, direct_synapse_information_1) app_edge.add_synapse_information(direct_synapse_information_2) app_edge.add_synapse_information(all_to_all_synapse_information) machine_edge = ProjectionMachineEdge(app_edge.synapse_information, pre_vertex, post_vertex) partition_name = "TestPartition" graph = MachineGraph("Test") graph.add_vertex(pre_vertex) graph.add_vertex(post_vertex) graph.add_edge(machine_edge, partition_name) graph_mapper = GraphMapper() graph_mapper.add_vertex_mapping(pre_vertex, pre_vertex_slice, pre_app_vertex) graph_mapper.add_vertex_mapping(post_vertex, post_vertex_slice, post_app_vertex) graph_mapper.add_edge_mapping(machine_edge, app_edge) weight_scales = [4096.0, 4096.0] key = 0 routing_info = RoutingInfo() routing_info.add_partition_info( PartitionRoutingInfo( [BaseKeyAndMask(key, 0xFFFFFFF0)], graph.get_outgoing_edge_partition_starting_at_vertex( pre_vertex, partition_name))) temp_spec = tempfile.mktemp() spec_writer = FileDataWriter(temp_spec) spec = DataSpecificationGenerator(spec_writer, None) master_pop_sz = 1000 master_pop_region = 0 all_syn_block_sz = 2000 synapse_region = 1 spec.reserve_memory_region(master_pop_region, master_pop_sz) spec.reserve_memory_region(synapse_region, all_syn_block_sz) synapse_type = MockSynapseType() synaptic_manager = SynapticManager(synapse_type=synapse_type, ring_buffer_sigma=5.0, spikes_per_second=100.0, config=config) synaptic_manager._write_synaptic_matrix_and_master_population_table( spec, [post_vertex_slice], post_slice_index, post_vertex, post_vertex_slice, all_syn_block_sz, weight_scales, master_pop_region, synapse_region, routing_info, graph_mapper, graph, machine_time_step) spec.end_specification() spec_writer.close() spec_reader = FileDataReader(temp_spec) executor = DataSpecificationExecutor(spec_reader, master_pop_sz + all_syn_block_sz) executor.execute() master_pop_table = executor.get_region(0) synaptic_matrix = executor.get_region(1) all_data = bytearray() all_data.extend( master_pop_table.region_data[:master_pop_table.max_write_pointer]) all_data.extend( synaptic_matrix.region_data[:synaptic_matrix.max_write_pointer]) master_pop_table_address = 0 synaptic_matrix_address = master_pop_table.max_write_pointer direct_synapses_address = struct.unpack_from( "<I", synaptic_matrix.region_data)[0] direct_synapses_address += synaptic_matrix_address + 8 indirect_synapses_address = synaptic_matrix_address + 4 placement = Placement(None, 0, 0, 1) transceiver = MockTransceiverRawData(all_data) # Get the master population table details items = synaptic_manager._poptable_type\ .extract_synaptic_matrix_data_location( key, master_pop_table_address, transceiver, placement.x, placement.y) # The first entry should be direct, but the rest should be indirect; # the second is potentially direct, but has been restricted by the # restriction on the size of the direct matrix assert len(items) == 3 # TODO: This has been changed because direct matrices are disabled! assert not items[0][2] assert not items[1][2] assert not items[2][2] data_1, row_len_1 = synaptic_manager._retrieve_synaptic_block( transceiver=transceiver, placement=placement, master_pop_table_address=master_pop_table_address, indirect_synapses_address=indirect_synapses_address, direct_synapses_address=direct_synapses_address, key=key, n_rows=pre_vertex_slice.n_atoms, index=0, using_extra_monitor_cores=False) connections_1 = synaptic_manager._synapse_io.read_synapses( direct_synapse_information_1, pre_vertex_slice, post_vertex_slice, row_len_1, 0, 2, weight_scales, data_1, None, app_edge.n_delay_stages, machine_time_step) # The first matrix is a 1-1 matrix, so row length is 1 assert row_len_1 == 1 # Check that all the connections have the right weight and delay assert len(connections_1) == post_vertex_slice.n_atoms assert all([conn["weight"] == 1.5 for conn in connections_1]) assert all([conn["delay"] == 1.0 for conn in connections_1]) data_2, row_len_2 = synaptic_manager._retrieve_synaptic_block( transceiver=transceiver, placement=placement, master_pop_table_address=master_pop_table_address, indirect_synapses_address=indirect_synapses_address, direct_synapses_address=direct_synapses_address, key=key, n_rows=pre_vertex_slice.n_atoms, index=1, using_extra_monitor_cores=False) connections_2 = synaptic_manager._synapse_io.read_synapses( direct_synapse_information_2, pre_vertex_slice, post_vertex_slice, row_len_2, 0, 2, weight_scales, data_2, None, app_edge.n_delay_stages, machine_time_step) # The second matrix is a 1-1 matrix, so row length is 1 assert row_len_2 == 1 # Check that all the connections have the right weight and delay assert len(connections_2) == post_vertex_slice.n_atoms assert all([conn["weight"] == 2.5 for conn in connections_2]) assert all([conn["delay"] == 2.0 for conn in connections_2]) data_3, row_len_3 = synaptic_manager._retrieve_synaptic_block( transceiver=transceiver, placement=placement, master_pop_table_address=master_pop_table_address, indirect_synapses_address=indirect_synapses_address, direct_synapses_address=direct_synapses_address, key=key, n_rows=pre_vertex_slice.n_atoms, index=2, using_extra_monitor_cores=False) connections_3 = synaptic_manager._synapse_io.read_synapses( all_to_all_synapse_information, pre_vertex_slice, post_vertex_slice, row_len_3, 0, 2, weight_scales, data_3, None, app_edge.n_delay_stages, machine_time_step) # The third matrix is an all-to-all matrix, so length is n_atoms assert row_len_3 == post_vertex_slice.n_atoms # Check that all the connections have the right weight and delay assert len(connections_3) == \ post_vertex_slice.n_atoms * pre_vertex_slice.n_atoms assert all([conn["weight"] == 4.5 for conn in connections_3]) assert all([conn["delay"] == 4.0 for conn in connections_3])