Esempio n. 1
0
def assign_random_keys(nets, machine, seed, n_bits):
    """Return a dictionary mapping a net to a unique randomly-assigned key.
    """
    # Ensure sufficient bits available
    assert n_bits >= np.ceil(np.log2(machine.width * machine.height * 17))

    # Create the RND-formatted bit field
    rnd_bf = BitField()
    rnd_bf.add_field("index", length=n_bits)
    rnd_bf.assign_fields()  # Fix the bitfield sizing

    # Prepare to store the nets and keys
    net_keys = dict()

    # Assign a unique random ID to each core
    random.seed(seed)
    ids = random.sample(range(1 << n_bits),
                        machine.width * machine.height * 17)

    # For each net look at the placement of the source vertex and hence
    # generate a key.
    for net, index in zip(nets, ids):
        # Generate the key and mask
        bf = rnd_bf(index=index)
        net_keys[net] = bf.get_value(), bf.get_mask()

    return net_keys
Esempio n. 2
0
    def test_eq(self):
        # Create several SignalParameters and ensure that they only
        # report equal when they are actually equal.
        ks = BitField()
        ks.add_field("x")

        params = ((False, 5, ks(x=2)),
                  (True, 5, ks(x=2)),
                  (False, 4, ks(x=2)),
                  (False, 5, ks(x=3)),
                  )

        tps = tuple(model.SignalParameters(*args) for args in params)

        # None of these transmission parameters should test as equivalent.
        for a in tps:
            for b in tps:
                if a is not b:
                    assert a != b

        # Create a whole new set of transmission parameters using the same set
        # of parameters and ensure that they all test equivalent to their
        # counterparts in the original list.
        for a, b in zip(tps, tuple(model.SignalParameters(*args)
                                   for args in params)):
            assert a is not b
            assert a == b
Esempio n. 3
0
def ks():
    keyspace = BitField()
    keyspace.add_field("x", length=8, start_at=24, tags="routing")
    keyspace.add_field("y", length=8, start_at=16, tags="routing")
    keyspace.add_field("p", length=5, start_at=11, tags="routing")
    keyspace.add_field("c", length=11, start_at=0)
    keyspace.assign_fields()
    return keyspace
def ks():
    keyspace = BitField()
    keyspace.add_field("x", length=8, start_at=24, tags="routing")
    keyspace.add_field("y", length=8, start_at=16, tags="routing")
    keyspace.add_field("p", length=5, start_at=11, tags="routing")
    keyspace.add_field("c", length=11, start_at=0)
    keyspace.assign_fields()
    return keyspace
Esempio n. 5
0
def assign_xyzp_keys(nets):
    """Return a dictionary mapping a net to a unique key indicating the XYZP
    co-ordinate of the source of the net.
    """
    # Create the XYZP-formatted bit field
    xyzp_bf = BitField()
    xyzp_bf.add_field("x", length=8, start_at=24)
    xyzp_bf.add_field("y", length=8, start_at=16)
    xyzp_bf.add_field("z", length=8, start_at=8)
    xyzp_bf.add_field("p", length=5, start_at=3)
    xyzp_bf.assign_fields()  # Fix the bitfield sizing

    # Prepare to store the nets and keys
    net_keys = dict()

    # For each net look at the placement of the source vertex and hence
    # generate a key.
    for net in nets:
        # Get the originating co-ordinates
        x, y, p = net.source

        # Get the minimal xyz co-ordinate
        x, y, z = minimise_xyz(to_xyz((x, y)))

        # Generate the key and mask
        bf = xyzp_bf(x=x, y=y, z=abs(z), p=p)
        net_keys[net] = bf.get_value(), bf.get_mask()

    return net_keys
Esempio n. 6
0
def assign_hilbert_keys(nets, machine):
    """Return a dictionary mapping a net to a unique key indicating the
    position of the originating chip along a Hilbert curve mapped to the
    SpiNNaker machine.
    """
    # Create the Hilbert-formatted bit field
    hilbert_bf = BitField()
    hilbert_bf.add_field("index", length=16, start_at=16)
    hilbert_bf.add_field("p", length=5, start_at=3)
    hilbert_bf.assign_fields()  # Fix the bitfield sizing

    # Prepare to store the nets and keys
    net_keys = dict()

    # Generate an appropriately scaled Hilbert curve
    curve = {(x, y): i
             for i, (x, y) in enumerate(chip
                                        for chip in hilbert_chip_order(machine)
                                        if chip in machine)}

    # For each net look at the placement of the source vertex and hence
    # generate a key.
    for net in nets:
        # Get the originating co-ordinates
        x, y, p = net.source

        # Generate the key and mask
        bf = hilbert_bf(index=curve[(x, y)], p=p)
        net_keys[net] = bf.get_value(), bf.get_mask()

    return net_keys
Esempio n. 7
0
    def __init__(self,
                 routing_tag="routing",
                 filter_routing_tag="filter_routing"):
        """Create a new keyspace container with the given tags for routing and
        filter routing.
        """
        # The tags
        self._routing_tag = routing_tag
        self._filter_routing_tag = filter_routing_tag

        # The keyspaces
        self._master_keyspace = _master_keyspace = BitField(length=32)
        _master_keyspace.add_field(
            "user", tags=[self.routing_tag, self.filter_routing_tag])

        # Initialise the defaultdict behaviour
        super(KeyspaceContainer,
              self).__init__(self._KeyspaceGetter(_master_keyspace))

        # Add the default keyspace
        nengo_ks = self["nengo"]
        nengo_ks.add_field("connection_id",
                           tags=[self.routing_tag, self.filter_routing_tag])
        nengo_ks.add_field("cluster", tags=[self.routing_tag])
        nengo_ks.add_field("index", start_at=0)
Esempio n. 8
0
 def test_sizeof_partitioned(self):
     r = KeyspacesRegion([(Signal(BitField(32)), {})] * 4,
                         fields=[mock.Mock()],
                         partitioned_by_atom=True,
                         prepend_num_keyspaces=False)
     assert r.sizeof(slice(1, 2)) == 4
     assert r.sizeof(slice(2, 4)) == 8
Esempio n. 9
0
def assign_hilbert_keys(nets, machine):
    """Return a dictionary mapping a net to a unique key indicating the
    position of the originating chip along a Hilbert curve mapped to the
    SpiNNaker machine.
    """
    # Create the Hilbert-formatted bit field
    hilbert_bf = BitField()
    hilbert_bf.add_field("index", length=16, start_at=16)
    hilbert_bf.add_field("p", length=5, start_at=3)
    hilbert_bf.assign_fields()  # Fix the bitfield sizing

    # Prepare to store the nets and keys
    net_keys = dict()

    # Generate an appropriately scaled Hilbert curve
    curve = {(x, y): i for i, (x, y) in enumerate(
        chip for chip in hilbert_chip_order(machine) if chip in machine)
    }

    # For each net look at the placement of the source vertex and hence
    # generate a key.
    for net in nets:
        # Get the originating co-ordinates
        x, y, p = net.source

        # Generate the key and mask
        bf = hilbert_bf(index=curve[(x, y)], p=p)
        net_keys[net] = bf.get_value(), bf.get_mask()

    return net_keys
Esempio n. 10
0
def assign_xyzp_keys(nets):
    """Return a dictionary mapping a net to a unique key indicating the XYZP
    co-ordinate of the source of the net.
    """
    # Create the XYZP-formatted bit field
    xyzp_bf = BitField()
    xyzp_bf.add_field("x", length=8, start_at=24)
    xyzp_bf.add_field("y", length=8, start_at=16)
    xyzp_bf.add_field("z", length=8, start_at=8)
    xyzp_bf.add_field("p", length=5, start_at=3)
    xyzp_bf.assign_fields()  # Fix the bitfield sizing

    # Prepare to store the nets and keys
    net_keys = dict()

    # For each net look at the placement of the source vertex and hence
    # generate a key.
    for net in nets:
        # Get the originating co-ordinates
        x, y, p = net.source

        # Get the minimal xyz co-ordinate
        x, y, z = minimise_xyz(to_xyz((x, y)))

        # Generate the key and mask
        bf = xyzp_bf(x=x, y=y, z=abs(z), p=p)
        net_keys[net] = bf.get_value(), bf.get_mask()

    return net_keys
Esempio n. 11
0
def test_get_derived_keyspaces():
    """Test creation of derived keyspaces."""
    ks = BitField()
    ks.add_field("index")
    ks.add_field("spam")

    # General usage
    kss = keyspaces.get_derived_keyspaces(ks, (slice(5), 5, 6, 7))
    for i, x in enumerate(kss):
        assert x.index == i

    # Specify a field
    kss = keyspaces.get_derived_keyspaces(ks, slice(1, 3),
                                          field_identifier="spam")
    for x, i in zip(kss, (1, 2)):
        assert x.spam == i

    # Fail when no maximum is specified
    with pytest.raises(ValueError):
        list(keyspaces.get_derived_keyspaces(ks, (slice(None))))
Esempio n. 12
0
def assign_random_keys(nets, machine, seed, n_bits):
    """Return a dictionary mapping a net to a unique randomly-assigned key.
    """
    # Ensure sufficient bits available
    assert n_bits >= np.ceil(np.log2(machine.width * machine.height * 17))

    # Create the RND-formatted bit field
    rnd_bf = BitField()
    rnd_bf.add_field("index", length=n_bits)
    rnd_bf.assign_fields()  # Fix the bitfield sizing

    # Prepare to store the nets and keys
    net_keys = dict()

    # Assign a unique random ID to each core
    random.seed(seed)
    ids = random.sample(range(1 << n_bits),
                        machine.width * machine.height * 17)

    # For each net look at the placement of the source vertex and hence
    # generate a key.
    for net, index in zip(nets, ids):
        # Generate the key and mask
        bf = rnd_bf(index=index)
        net_keys[net] = bf.get_value(), bf.get_mask()

    return net_keys
Esempio n. 13
0
    def test_sizeof_no_prepends(self, key_bits, n_keys, n_fields, partitioned,
                                vertex_slice):
        # Generate the list of keys, prepends and fields
        keys = [(Signal(BitField(key_bits)), {}) for _ in range(n_keys)]
        fields = [mock.Mock() for _ in range(n_fields)]

        # Create the region
        r = KeyspacesRegion(keys, fields, partitioned)

        # Determine the size
        n_atoms = (n_keys if not partitioned else vertex_slice.stop -
                   vertex_slice.start)
        assert r.sizeof(vertex_slice) == n_atoms * n_fields * 4
Esempio n. 14
0
    def __init__(self, neuron_threshold, neuron_decay, test_data,
                 timer_period_us=20000, sim_ticks=200, num_profile_samples=None):
        # Cache network parameters
        self._neuron_threshold = neuron_threshold
        self._neuron_decay = neuron_decay
        self._test_data = test_data
        self._timer_period_us = timer_period_us
        self._sim_ticks = sim_ticks
        self._num_profile_samples = num_profile_samples

        self._vert_index = 0

        # Create data structures
        self._layers = []
        self._vertex_applications = {}
        self._vertex_resources = {}

        # Create a 32-bit keyspace
        self._keyspace = BitField(32)
        self._keyspace.add_field("vert_index", tags="routing")
        self._keyspace.add_field("z", start_at=16)
        self._keyspace.add_field("y", length=8, start_at=8)
        self._keyspace.add_field("x", length=8, start_at=0)
Esempio n. 15
0
    def test_write_subregion_calls_fields(self):
        """Check that writing a subregion to file calls the field functions
        with each key and that any extra arguments are passed along.
        """
        # Create some keyspaces
        keys = [(Signal(BitField(32)), {}) for _ in range(10)]

        # Create two fields
        fields = [mock.Mock() for _ in range(2)]
        fields[0].return_value = 0
        fields[1].return_value = 0

        # Create an UNPARTITIONED region and write out a slice, check that
        # field methods were called with EACH key and the kwargs.
        r = KeyspacesRegion(keys, fields)
        fp = tempfile.TemporaryFile()

        kwargs = {"spam": "and eggs", "green_eggs": "and ham"}
        r.write_subregion_to_file(fp, slice(0, 1), **kwargs)

        for f in fields:
            f.assert_has_calls(
                [mock.call(k.keyspace, **kwargs) for k, _ in keys])
            f.reset_mock()

        # Create a PARTITIONED region and write out a slice, check that
        # field methods were called with EACH key IN THE SLICE and the kwargs.
        r = KeyspacesRegion(keys, fields, partitioned_by_atom=True)

        for sl in (slice(0, 1), slice(2, 5)):
            fp = tempfile.TemporaryFile()

            kwargs = {"spam": "spam spam spam", "in_a_box": "with a fox"}
            r.write_subregion_to_file(fp, sl, **kwargs)

            for f in fields:
                f.assert_has_calls(
                    [mock.call(k.keyspace, **kwargs) for k, _ in keys[sl]])
                f.reset_mock()
Esempio n. 16
0
def test_get_derived_keyspaces():
    """Test creation of derived keyspaces."""
    ks = BitField()
    ks.add_field("index")
    ks.add_field("spam")

    # General usage
    kss = keyspaces.get_derived_keyspaces(ks, (slice(5), 5, 6, 7))
    for i, x in enumerate(kss):
        assert x.index == i

    # Specify a field
    kss = keyspaces.get_derived_keyspaces(ks,
                                          slice(1, 3),
                                          field_identifier="spam")
    for x, i in zip(kss, (1, 2)):
        assert x.spam == i

    # Fail when no maximum is specified
    with pytest.raises(ValueError):
        list(keyspaces.get_derived_keyspaces(ks, (slice(None))))
Esempio n. 17
0
def make_routing_tables():
    # Create a perfect SpiNNaker machine to build against
    machine = Machine(12, 12)

    # Assign a vertex to each of the 17 application cores on each chip
    vertices = OrderedDict(
        ((x, y, p), object()) for x, y in machine for p in range(1, 18)
    )

    # Generate the vertex resources, placements and allocations (required for
    # routing)
    vertices_resources = OrderedDict(
        (vertex, {Cores: 1}) for vertex in itervalues(vertices)
    )
    placements = OrderedDict(
        (vertex, (x, y)) for (x, y, p), vertex in iteritems(vertices)
    )
    allocations = OrderedDict(
        (vertex, {Cores: slice(p, p+1)}) for (x, y, p), vertex in
        iteritems(vertices)
    )

    # Compute the distance dependent probabilities
    probs = {d: .5*math.exp(-.65*d) for d in
             range(max(machine.width, machine.height))}

    # Make the nets, each vertex is connected with distance dependent
    # probability to other vertices.
    random.seed(123)
    nets = OrderedDict()
    for source_coord, source in iteritems(vertices):
        # Convert source_coord to xyz form
        source_coord_xyz = minimise_xyz(to_xyz(source_coord[:-1]))

        # Construct the sinks list
        sinks = list()
        for sink_coord, sink in iteritems(vertices):
            # Convert sink_coord to xyz form
            sink_coord = minimise_xyz(to_xyz(sink_coord[:-1]))

            # Get the path length
            dist = shortest_torus_path_length(source_coord_xyz, sink_coord,
                                              machine.width, machine.height)

            if random.random() < probs[dist]:
                sinks.append(sink)

        # Add the net
        nets[source_coord] = Net(source, sinks)

    rig_nets = list(itervalues(nets))  # Just the nets

    # Determine how many bits to use in the keys
    xyp_fields = BitField(32)
    xyp_fields.add_field("x", length=8, start_at=24)
    xyp_fields.add_field("y", length=8, start_at=16)
    xyp_fields.add_field("p", length=5, start_at=11)

    xyzp_fields = BitField(32)
    xyzp_fields.add_field("x", length=8, start_at=24)
    xyzp_fields.add_field("y", length=8, start_at=16)
    xyzp_fields.add_field("z", length=8, start_at=8)
    xyzp_fields.add_field("p", length=5, start_at=3)

    hilbert_fields = BitField(32)
    hilbert_fields.add_field("index", length=16, start_at=16)
    hilbert_fields.add_field("p", length=5, start_at=11)

    random.seed(321)
    rnd_fields = BitField(32)
    rnd_fields.add_field("rnd", length=12, start_at=20)
    rnd_seen = set()

    # Generate the routing keys
    net_keys_xyp = OrderedDict()
    net_keys_xyzp = OrderedDict()
    net_keys_hilbert = OrderedDict()
    net_keys_rnd = OrderedDict()
    for i, (x, y) in enumerate(chip for chip in hilbert_chip_order(machine) if
                               chip in machine):
        # Add the key for each net from each processor
        for p in range(1, 18):
            # Get the net
            net = nets[(x, y, p)]

            # Construct the xyp key/mask
            net_keys_xyp[net] = xyp_fields(x=x, y=y, p=p)

            # Construct the xyzp mask
            x_, y_, z_ = minimise_xyz(to_xyz((x, y)))
            net_keys_xyzp[net] = xyzp_fields(x=x_, y=y_, z=abs(z_), p=p)

            # Construct the Hilbert key/mask
            net_keys_hilbert[net] = hilbert_fields(index=i, p=p)

            # Construct the random 12 bit value field
            val = None
            while val is None or val in rnd_seen:
                val = random.getrandbits(12)
            rnd_seen.add(val)
            net_keys_rnd[net] = rnd_fields(rnd=val)

    # Route the network and then generate the routing tables
    constraints = list()
    print("Routing...")
    routing_tree = route(vertices_resources, rig_nets, machine, constraints,
                         placements, allocations)

    # Write the routing tables to file
    for fields, desc in ((net_keys_xyp, "xyp"),
                         (net_keys_xyzp, "xyzp"),
                         (net_keys_hilbert, "hilbert"),
                         (net_keys_rnd, "rnd")):
        print("Getting keys and masks...")
        keys = {net: (bf.get_value(), bf.get_mask()) for net, bf in
                iteritems(fields)}

        print("Constructing routing tables for {}...".format(desc))
        tables = routing_tree_to_tables(routing_tree, keys)
        print([len(x) for x in itervalues(tables)])

        print("Writing to file...")
        fn = "uncompressed/gaussian_{}_{}_{}.bin".format(
            machine.width, machine.height, desc)
        with open(fn, "wb+") as f:
            dump_routing_tables(f, tables)
Esempio n. 18
0
class ConvNet(object):
    def __init__(self, neuron_threshold, neuron_decay, test_data,
                 timer_period_us=20000, sim_ticks=200, num_profile_samples=None):
        # Cache network parameters
        self._neuron_threshold = neuron_threshold
        self._neuron_decay = neuron_decay
        self._test_data = test_data
        self._timer_period_us = timer_period_us
        self._sim_ticks = sim_ticks
        self._num_profile_samples = num_profile_samples

        self._vert_index = 0

        # Create data structures
        self._layers = []
        self._vertex_applications = {}
        self._vertex_resources = {}

        # Create a 32-bit keyspace
        self._keyspace = BitField(32)
        self._keyspace.add_field("vert_index", tags="routing")
        self._keyspace.add_field("z", start_at=16)
        self._keyspace.add_field("y", length=8, start_at=8)
        self._keyspace.add_field("x", length=8, start_at=0)

    # ------------------------------------------------------------------------
    # Public methods
    # ------------------------------------------------------------------------
    def add_layer(self, output_width, output_height, padding, stride, weights,
                  record_spikes):
        # Get index of new layer
        layer_index = len(self._layers)

        # Add layer to conv net
        self._layers.append(
            ConvNeuronLayer(start_vert_index=self._vert_index,
                            output_width=output_width,
                            output_height=output_height,
                            padding=padding, stride=stride,
                            neuron_decay=self._neuron_decay,
                            neuron_threshold=self._neuron_threshold,
                            record_spikes=record_spikes,
                            weights=weights, parent_keyspace=self._keyspace,
                            input_data=(self._test_data if layer_index == 0
                                        else None),
                            vertex_applications=self._vertex_applications,
                            vertex_resources=self._vertex_resources,
                            timer_period_us=self._timer_period_us,
                            sim_ticks=self._sim_ticks,
                            num_profile_samples=self._num_profile_samples))

        # **YUCK** update vertex index
        self._vert_index += len(self._layers[-1].vertices)

    def run(self, spinnaker_hostname, disable_software_watchdog=False):
        logger.info("Assigning keyspaces")

        # Finalise keyspace fields
        self._keyspace.assign_fields()

        # Extract position and length of z-field in keyspace
        z_loc, z_length = self._keyspace.get_location_and_length("z")
        z_mask = (1 << z_length) - 1
        logger.debug("Z location:%u, length:%u, mask:%08x",
                        z_loc, z_length, z_mask)

        # Loop through layers and their successors
        logger.info("Building nets")
        nets = []
        net_keys = {}
        for layer, next_layer in zip(self._layers[:-1], self._layers[1:]):
            # Loop through all vertices in layer
            for vertex in layer.vertices:
                # Create a key for the vertex feeding forward
                net_key = (vertex.routing_key, vertex.routing_mask)

                # Create a net connecting vertex
                # to all vertices in next layer
                net = Net(vertex, next_layer.vertices)

                # Add net to list and associate with key
                nets.append(net)
                net_keys[net] = net_key

        machine_controller = None
        try:
            # Get machine controller from connected SpiNNaker board and boot
            machine_controller = MachineController(spinnaker_hostname)
            machine_controller.boot()

            # Get system info
            system_info = machine_controller.get_system_info()
            logger.debug("Found %u chip machine", len(system_info))

            # Place-and-route
            logger.info("Placing and routing")
            placements, allocations, run_app_map, routing_tables =\
                place_and_route_wrapper(self._vertex_resources,
                                        self._vertex_applications,
                                        nets, net_keys, system_info)

            # Convert placement values to a set to get unique list of chips
            unique_chips = set(itervalues(placements))
            logger.info("Placed on %u cores (%u chips)",
                        len(placements), len(unique_chips))
            logger.debug(list(itervalues(placements)))

            # If software watchdog is disabled, write zero to each chip in
            # placement's SV struct, otherwise, write default from SV struct file
            wdog = (0 if disable_software_watchdog else
                    machine_controller.structs["sv"]["soft_wdog"].default)
            for x, y in unique_chips:
                logger.debug("Setting software watchdog to %u for chip %u, %u",
                            wdog, x, y)
                machine_controller.write_struct_field("sv", "soft_wdog",
                                                    wdog, x, y)

            logger.info("Loading layers")
            for i, l in enumerate(self._layers):
                logger.info("\tLayer %u", i)
                l.load(placements, allocations, machine_controller, z_mask)

            # Load routing tables and applications
            logger.info("Loading routing tables")
            machine_controller.load_routing_tables(routing_tables)

            logger.info("Loading applications")
            machine_controller.load_application(run_app_map)

            # Wait for all cores to hit SYNC0
            logger.info("Waiting for synch")
            num_verts = len(self._vertex_resources)
            self._wait_for_transition(placements, allocations,
                                      machine_controller,
                                      AppState.init, AppState.sync0,
                                      num_verts)

            # Sync!
            machine_controller.send_signal("sync0")

            # Wait for simulation to complete
            logger.info("Simulating")
            time.sleep(float(self._timer_period_us * self._sim_ticks) / 1000000.0)

            # Wait for all cores to exit
            logger.info("Waiting for exit")
            self._wait_for_transition(placements, allocations,
                                      machine_controller,
                                      AppState.run, AppState.exit,
                                      num_verts)

            logger.info("Reading stats")
            for i, l in enumerate(self._layers):
                stats = l.read_statistics()
                logger.info("\tLayer %u", i)
                logger.info("\t\tInput buffer overflows:%u",
                            np.sum(stats["input_buffer_overflows"]))
                logger.info("\t\tTask queue overflows:%u",
                            np.sum(stats["task_queue_full"]))
                logger.info("\t\tTimer event overruns:%u",
                            np.sum(stats["timer_event_overflows"]))
                logger.info("\t\tSpikes emitted:%u",
                            np.sum(stats["spikes_emitted"]))
                logger.info("\t\tSpikes convolved:%u",
                            np.sum(stats["spikes_convolved"]))

            if self._num_profile_samples is not None:
                logger.info("Reading profiling data")

                timestep_ms = self._timer_period_us / 1000.0
                duration_ms = timestep_ms * self._sim_ticks

                for i, l in enumerate(self._layers):
                    profiling_data = l.read_profile()[0][1]
                    logger.info("\tLayer %u", i)
                    #print profiling_data
                    profiling.print_summary(profiling_data, duration_ms,
                                            timestep_ms)


            #logger.info("Downloading spikes")
            # Save off layer data
            for i, l in enumerate(self._layers):
                spikes = l.read_recorded_spikes()
                np.save("layer_%u.npy" % i, spikes)

        finally:
            if machine_controller is not None:
                logger.info("Stopping SpiNNaker application")
                machine_controller.send_signal("stop")

    # ------------------------------------------------------------------------
    # Private methods
    # ------------------------------------------------------------------------
    def _wait_for_transition(self, placements, allocations, machine_controller,
                             from_state, to_state,
                             num_verts, timeout=5.0):
        while True:
            # If no cores are still in from_state, stop
            if machine_controller.count_cores_in_state(from_state) == 0:
                break

            # Wait a bit
            time.sleep(1.0)

        # Wait for all cores to reach to_state
        cores_in_to_state =\
            machine_controller.wait_for_cores_to_reach_state(
                to_state, num_verts, timeout=timeout)
        if cores_in_to_state != num_verts:
            # Loop through all placed vertices
            for vertex, (x, y) in iteritems(placements):
                p = allocations[vertex][machine.Cores].start
                status = machine_controller.get_processor_status(p, x, y)
                if status.cpu_state is not to_state:
                    print("Core ({}, {}, {}) in state {!s}".format(
                        x, y, p, status))
                    print machine_controller.get_iobuf(p, x, y)
            raise Exception("Unexpected core failures "
                            "before reaching %s state (%u/%u)." % (to_state, cores_in_to_state, num_verts))
Esempio n. 19
0
 def test_sizeof_with_prepends(self):
     r = KeyspacesRegion([(Signal(BitField(32)), {})],
                         fields=[],
                         prepend_num_keyspaces=True)
     assert r.sizeof(slice(None)) == 4
    def __call__(self, application_graph, graph_mapper, machine_graph,
                 n_keys_map):
        """
        :param application_graph: The application graph
        :param graph_mapper: the mapping between graphs
        :param machine_graph: the machine graph
        :param n_keys_map: the mapping between edges and n keys
        :return: routing information objects
        """
        progress_bar = ProgressBar(
            machine_graph.n_outgoing_edge_partitions * 3,
            "Allocating routing keys")

        # ensure groups are stable and correct
        self._determine_groups(
            machine_graph, graph_mapper, application_graph, n_keys_map,
            progress_bar)

        # define the key space
        bit_field_space = BitField(32)
        field_positions = set()

        # locate however many types of constraints there are
        seen_fields = deduce_types(machine_graph)
        progress_bar.update(machine_graph.n_outgoing_edge_partitions)

        if len(seen_fields) > 1:
            self._adds_application_field_to_the_fields(seen_fields)

        # handle the application space
        self._create_application_space_in_the_bit_field_space(
            bit_field_space, seen_fields, field_positions)

        # assign fields to positions in the space
        bit_field_space.assign_fields()

        # get positions of the flexible fields:
        self._assign_flexi_field_positions(
            bit_field_space, seen_fields, field_positions)

        # create routing_info_allocator
        routing_info = RoutingInfo()
        seen_mask_instances = 0

        # extract keys and masks for each edge from the bitfield
        for partition in machine_graph.outgoing_edge_partitions:
            # get keys and masks
            keys_and_masks, seen_mask_instances = \
                self._extract_keys_and_masks_from_bit_field(
                    partition, bit_field_space, n_keys_map,
                    seen_mask_instances)

            # update routing info for each edge in the partition
            partition_info = PartitionRoutingInfo(keys_and_masks, partition)
            routing_info.add_partition_info(partition_info)

            # update the progress bar again
            progress_bar.update()
        progress_bar.end()

        return routing_info, field_positions
Esempio n. 21
0
def make_routing_tables():
    # Create a perfect SpiNNaker machine to build against
    machine = Machine(12, 12)

    # Assign a vertex to each of the 17 application cores on each chip
    vertices = OrderedDict(
        ((x, y, p), object()) for x, y in machine for p in range(1, 18))

    # Generate the vertex resources, placements and allocations (required for
    # routing)
    vertices_resources = OrderedDict((vertex, {
        Cores: 1
    }) for vertex in itervalues(vertices))
    placements = OrderedDict(
        (vertex, (x, y)) for (x, y, p), vertex in iteritems(vertices))
    allocations = OrderedDict((vertex, {
        Cores: slice(p, p + 1)
    }) for (x, y, p), vertex in iteritems(vertices))

    # Compute the distance dependent probabilities - this is a geometric
    # distribution such that each core has a 50% chance of being connected to
    # each core on the same chip, 25% on chips one hop away, 12.5% on chips two
    # hops away, etc.
    p = 0.5
    probs = {
        d: p * (1 - p)**d
        for d in range(max(machine.width, machine.height))
    }

    p = 0.3
    dprobs = {
        d: p * (1 - p)**d
        for d in range(max(machine.width, machine.height))
    }

    # Compute offsets to get to centroids
    vector_centroids = list()
    for d in (5, 6, 7):
        for i in range(d + 1):
            for j in range(d + 1 - i):
                vector_centroids.append((i, j, d - i - j))

    # Make the nets, each vertex is connected with distance dependent
    # probability to other vertices.
    random.seed(123)
    nets = OrderedDict()
    for source_coord, source in iteritems(vertices):
        # Convert source_coord to xyz form
        source_coord_xyz = minimise_xyz(to_xyz(source_coord[:-1]))

        # Add a number of centroids
        x, y, z = source_coord_xyz
        possible_centroids = [
            minimise_xyz((x + i, y + j, z + k)) for i, j, k in vector_centroids
        ]
        n_centroids = random.choice(17 * (0, ) + (1, 1) + (2, ))
        centroids = random.sample(possible_centroids, n_centroids)

        # Construct the sinks list
        sinks = list()
        for sink_coord, sink in iteritems(vertices):
            # Convert sink_coord to xyz form
            sink_coord = minimise_xyz(to_xyz(sink_coord[:-1]))

            # Get the path length to the original source
            dist = shortest_torus_path_length(source_coord_xyz, sink_coord,
                                              machine.width, machine.height)
            if random.random() < probs[dist]:
                sinks.append(sink)
                continue

            # See if the sink is connected to the centre of any of the
            # centroids.
            for coord in centroids:
                dist = shortest_torus_path_length(coord, sink_coord,
                                                  machine.width,
                                                  machine.height)

                if random.random() < dprobs[dist]:
                    sinks.append(sink)
                    break

        # Add the net
        nets[source_coord] = Net(source, sinks)

    rig_nets = list(itervalues(nets))  # Just the nets

    # Determine how many bits to use in the keys
    xyp_fields = BitField(32)
    xyp_fields.add_field("x", length=8, start_at=24)
    xyp_fields.add_field("y", length=8, start_at=16)
    xyp_fields.add_field("p", length=5, start_at=11)

    xyzp_fields = BitField(32)
    xyzp_fields.add_field("x", length=8, start_at=24)
    xyzp_fields.add_field("y", length=8, start_at=16)
    xyzp_fields.add_field("z", length=8, start_at=8)
    xyzp_fields.add_field("p", length=5, start_at=3)

    hilbert_fields = BitField(32)
    hilbert_fields.add_field("index", length=16, start_at=16)
    hilbert_fields.add_field("p", length=5, start_at=11)

    random.seed(321)
    rnd_fields = BitField(32)
    rnd_fields.add_field("rnd", length=12, start_at=20)
    rnd_seen = set()

    # Generate the routing keys
    net_keys_xyp = OrderedDict()
    net_keys_xyzp = OrderedDict()
    net_keys_hilbert = OrderedDict()
    net_keys_rnd = OrderedDict()
    for i, (x, y) in enumerate(chip for chip in hilbert_chip_order(machine)
                               if chip in machine):
        # Add the key for each net from each processor
        for p in range(1, 18):
            # Get the net
            net = nets[(x, y, p)]

            # Construct the xyp key/mask
            net_keys_xyp[net] = xyp_fields(x=x, y=y, p=p)

            # Construct the xyzp mask
            x_, y_, z_ = minimise_xyz(to_xyz((x, y)))
            net_keys_xyzp[net] = xyzp_fields(x=x_, y=y_, z=abs(z_), p=p)

            # Construct the Hilbert key/mask
            net_keys_hilbert[net] = hilbert_fields(index=i, p=p)

            # Construct the random 12 bit value field
            val = None
            while val is None or val in rnd_seen:
                val = random.getrandbits(12)
            rnd_seen.add(val)
            net_keys_rnd[net] = rnd_fields(rnd=val)

    # Route the network and then generate the routing tables
    constraints = list()
    print("Routing...")
    routing_tree = route(vertices_resources, rig_nets, machine, constraints,
                         placements, allocations)

    # Write the routing tables to file
    for fields, desc in ((net_keys_xyp, "xyp"), (net_keys_xyzp, "xyzp"),
                         (net_keys_hilbert, "hilbert"), (net_keys_rnd, "rnd")):
        print("Getting keys and masks...")
        keys = OrderedDict((net, (bf.get_value(), bf.get_mask()))
                           for net, bf in iteritems(fields))

        print("Constructing routing tables for {}...".format(desc))
        tables = routing_tree_to_tables(routing_tree, keys)
        print([len(x) for x in itervalues(tables)])

        print("Writing to file...")
        fn = "uncompressed/centroid_{}_{}_{}.bin".format(
            machine.width, machine.height, desc)
        with open(fn, "wb+") as f:
            dump_routing_tables(f, tables)
Esempio n. 22
0
def make_routing_tables():
    # Create a perfect SpiNNaker machine to build against
    machine = Machine(12, 12)

    # Assign a vertex to each of the 17 application cores on each chip
    vertices = OrderedDict(
        ((x, y, p), object()) for x, y in machine for p in range(1, 18)
    )

    # Generate the vertex resources, placements and allocations (required for
    # routing)
    vertices_resources = OrderedDict(
        (vertex, {Cores: 1}) for vertex in itervalues(vertices)
    )
    placements = OrderedDict(
        (vertex, (x, y)) for (x, y, p), vertex in iteritems(vertices)
    )
    allocations = OrderedDict(
        (vertex, {Cores: slice(p, p+1)}) for (x, y, p), vertex in
        iteritems(vertices)
    )

    # Compute the distance dependent probabilities - this is a geometric
    # distribution such that each core has a 50% chance of being connected to
    # each core on the same chip, 25% on chips one hop away, 12.5% on chips two
    # hops away, etc.
    p = 0.5
    probs = {d: p*(1 - p)**d for d in
             range(max(machine.width, machine.height))}

    p = 0.3
    dprobs = {d: p*(1 - p)**d for d in
              range(max(machine.width, machine.height))}

    # Compute offsets to get to centroids
    vector_centroids = list()
    for d in (5, 6, 7):
        for i in range(d + 1):
            for j in range(d + 1 - i):
                vector_centroids.append((i, j, d - i - j))

    # Make the nets, each vertex is connected with distance dependent
    # probability to other vertices.
    random.seed(123)
    nets = OrderedDict()
    for source_coord, source in iteritems(vertices):
        # Convert source_coord to xyz form
        source_coord_xyz = minimise_xyz(to_xyz(source_coord[:-1]))

        # Add a number of centroids
        x, y, z = source_coord_xyz
        possible_centroids = [minimise_xyz((x + i, y + j, z + k)) for
                              i, j, k in vector_centroids]
        n_centroids = random.choice(17*(0, ) + (1, 1) + (2, ))
        centroids = random.sample(possible_centroids, n_centroids)

        # Construct the sinks list
        sinks = list()
        for sink_coord, sink in iteritems(vertices):
            # Convert sink_coord to xyz form
            sink_coord = minimise_xyz(to_xyz(sink_coord[:-1]))

            # Get the path length to the original source
            dist = shortest_torus_path_length(source_coord_xyz, sink_coord,
                                              machine.width, machine.height)
            if random.random() < probs[dist]:
                sinks.append(sink)
                continue

            # See if the sink is connected to the centre of any of the
            # centroids.
            for coord in centroids:
                dist = shortest_torus_path_length(
                    coord, sink_coord, machine.width, machine.height
                )

                if random.random() < dprobs[dist]:
                    sinks.append(sink)
                    break

        # Add the net
        nets[source_coord] = Net(source, sinks)

    rig_nets = list(itervalues(nets))  # Just the nets

    # Determine how many bits to use in the keys
    xyp_fields = BitField(32)
    xyp_fields.add_field("x", length=8, start_at=24)
    xyp_fields.add_field("y", length=8, start_at=16)
    xyp_fields.add_field("p", length=5, start_at=11)

    xyzp_fields = BitField(32)
    xyzp_fields.add_field("x", length=8, start_at=24)
    xyzp_fields.add_field("y", length=8, start_at=16)
    xyzp_fields.add_field("z", length=8, start_at=8)
    xyzp_fields.add_field("p", length=5, start_at=3)

    hilbert_fields = BitField(32)
    hilbert_fields.add_field("index", length=16, start_at=16)
    hilbert_fields.add_field("p", length=5, start_at=11)

    random.seed(321)
    rnd_fields = BitField(32)
    rnd_fields.add_field("rnd", length=12, start_at=20)
    rnd_seen = set()

    # Generate the routing keys
    net_keys_xyp = OrderedDict()
    net_keys_xyzp = OrderedDict()
    net_keys_hilbert = OrderedDict()
    net_keys_rnd = OrderedDict()
    for i, (x, y) in enumerate(chip for chip in hilbert_chip_order(machine) if
                               chip in machine):
        # Add the key for each net from each processor
        for p in range(1, 18):
            # Get the net
            net = nets[(x, y, p)]

            # Construct the xyp key/mask
            net_keys_xyp[net] = xyp_fields(x=x, y=y, p=p)

            # Construct the xyzp mask
            x_, y_, z_ = minimise_xyz(to_xyz((x, y)))
            net_keys_xyzp[net] = xyzp_fields(x=x_, y=y_, z=abs(z_), p=p)

            # Construct the Hilbert key/mask
            net_keys_hilbert[net] = hilbert_fields(index=i, p=p)

            # Construct the random 12 bit value field
            val = None
            while val is None or val in rnd_seen:
                val = random.getrandbits(12)
            rnd_seen.add(val)
            net_keys_rnd[net] = rnd_fields(rnd=val)

    # Route the network and then generate the routing tables
    constraints = list()
    print("Routing...")
    routing_tree = route(vertices_resources, rig_nets, machine, constraints,
                         placements, allocations)

    # Write the routing tables to file
    for fields, desc in ((net_keys_xyp, "xyp"),
                         (net_keys_xyzp, "xyzp"),
                         (net_keys_hilbert, "hilbert"),
                         (net_keys_rnd, "rnd")):
        print("Getting keys and masks...")
        keys = OrderedDict(
            (net, (bf.get_value(), bf.get_mask())) for net, bf in
            iteritems(fields)
        )

        print("Constructing routing tables for {}...".format(desc))
        tables = routing_tree_to_tables(routing_tree, keys)
        print([len(x) for x in itervalues(tables)])

        print("Writing to file...")
        fn = "uncompressed/centroid_{}_{}_{}.bin".format(
            machine.width, machine.height, desc)
        with open(fn, "wb+") as f:
            dump_routing_tables(f, tables)