Exemplo n.º 1
0
def assign_xyzp_keys(nets):
    """Return a dictionary mapping a net to a unique key indicating the XYZP
    co-ordinate of the source of the net.
    """
    # Create the XYZP-formatted bit field
    xyzp_bf = BitField()
    xyzp_bf.add_field("x", length=8, start_at=24)
    xyzp_bf.add_field("y", length=8, start_at=16)
    xyzp_bf.add_field("z", length=8, start_at=8)
    xyzp_bf.add_field("p", length=5, start_at=3)
    xyzp_bf.assign_fields()  # Fix the bitfield sizing

    # Prepare to store the nets and keys
    net_keys = dict()

    # For each net look at the placement of the source vertex and hence
    # generate a key.
    for net in nets:
        # Get the originating co-ordinates
        x, y, p = net.source

        # Get the minimal xyz co-ordinate
        x, y, z = minimise_xyz(to_xyz((x, y)))

        # Generate the key and mask
        bf = xyzp_bf(x=x, y=y, z=abs(z), p=p)
        net_keys[net] = bf.get_value(), bf.get_mask()

    return net_keys
Exemplo n.º 2
0
def assign_hilbert_keys(nets, machine):
    """Return a dictionary mapping a net to a unique key indicating the
    position of the originating chip along a Hilbert curve mapped to the
    SpiNNaker machine.
    """
    # Create the Hilbert-formatted bit field
    hilbert_bf = BitField()
    hilbert_bf.add_field("index", length=16, start_at=16)
    hilbert_bf.add_field("p", length=5, start_at=3)
    hilbert_bf.assign_fields()  # Fix the bitfield sizing

    # Prepare to store the nets and keys
    net_keys = dict()

    # Generate an appropriately scaled Hilbert curve
    curve = {(x, y): i
             for i, (x, y) in enumerate(chip
                                        for chip in hilbert_chip_order(machine)
                                        if chip in machine)}

    # For each net look at the placement of the source vertex and hence
    # generate a key.
    for net in nets:
        # Get the originating co-ordinates
        x, y, p = net.source

        # Generate the key and mask
        bf = hilbert_bf(index=curve[(x, y)], p=p)
        net_keys[net] = bf.get_value(), bf.get_mask()

    return net_keys
Exemplo n.º 3
0
def assign_hilbert_keys(nets, machine):
    """Return a dictionary mapping a net to a unique key indicating the
    position of the originating chip along a Hilbert curve mapped to the
    SpiNNaker machine.
    """
    # Create the Hilbert-formatted bit field
    hilbert_bf = BitField()
    hilbert_bf.add_field("index", length=16, start_at=16)
    hilbert_bf.add_field("p", length=5, start_at=3)
    hilbert_bf.assign_fields()  # Fix the bitfield sizing

    # Prepare to store the nets and keys
    net_keys = dict()

    # Generate an appropriately scaled Hilbert curve
    curve = {(x, y): i for i, (x, y) in enumerate(
        chip for chip in hilbert_chip_order(machine) if chip in machine)
    }

    # For each net look at the placement of the source vertex and hence
    # generate a key.
    for net in nets:
        # Get the originating co-ordinates
        x, y, p = net.source

        # Generate the key and mask
        bf = hilbert_bf(index=curve[(x, y)], p=p)
        net_keys[net] = bf.get_value(), bf.get_mask()

    return net_keys
Exemplo n.º 4
0
def assign_random_keys(nets, machine, seed, n_bits):
    """Return a dictionary mapping a net to a unique randomly-assigned key.
    """
    # Ensure sufficient bits available
    assert n_bits >= np.ceil(np.log2(machine.width * machine.height * 17))

    # Create the RND-formatted bit field
    rnd_bf = BitField()
    rnd_bf.add_field("index", length=n_bits)
    rnd_bf.assign_fields()  # Fix the bitfield sizing

    # Prepare to store the nets and keys
    net_keys = dict()

    # Assign a unique random ID to each core
    random.seed(seed)
    ids = random.sample(range(1 << n_bits),
                        machine.width * machine.height * 17)

    # For each net look at the placement of the source vertex and hence
    # generate a key.
    for net, index in zip(nets, ids):
        # Generate the key and mask
        bf = rnd_bf(index=index)
        net_keys[net] = bf.get_value(), bf.get_mask()

    return net_keys
Exemplo n.º 5
0
def assign_xyzp_keys(nets):
    """Return a dictionary mapping a net to a unique key indicating the XYZP
    co-ordinate of the source of the net.
    """
    # Create the XYZP-formatted bit field
    xyzp_bf = BitField()
    xyzp_bf.add_field("x", length=8, start_at=24)
    xyzp_bf.add_field("y", length=8, start_at=16)
    xyzp_bf.add_field("z", length=8, start_at=8)
    xyzp_bf.add_field("p", length=5, start_at=3)
    xyzp_bf.assign_fields()  # Fix the bitfield sizing

    # Prepare to store the nets and keys
    net_keys = dict()

    # For each net look at the placement of the source vertex and hence
    # generate a key.
    for net in nets:
        # Get the originating co-ordinates
        x, y, p = net.source

        # Get the minimal xyz co-ordinate
        x, y, z = minimise_xyz(to_xyz((x, y)))

        # Generate the key and mask
        bf = xyzp_bf(x=x, y=y, z=abs(z), p=p)
        net_keys[net] = bf.get_value(), bf.get_mask()

    return net_keys
Exemplo n.º 6
0
def assign_random_keys(nets, machine, seed, n_bits):
    """Return a dictionary mapping a net to a unique randomly-assigned key.
    """
    # Ensure sufficient bits available
    assert n_bits >= np.ceil(np.log2(machine.width * machine.height * 17))

    # Create the RND-formatted bit field
    rnd_bf = BitField()
    rnd_bf.add_field("index", length=n_bits)
    rnd_bf.assign_fields()  # Fix the bitfield sizing

    # Prepare to store the nets and keys
    net_keys = dict()

    # Assign a unique random ID to each core
    random.seed(seed)
    ids = random.sample(range(1 << n_bits),
                        machine.width * machine.height * 17)

    # For each net look at the placement of the source vertex and hence
    # generate a key.
    for net, index in zip(nets, ids):
        # Generate the key and mask
        bf = rnd_bf(index=index)
        net_keys[net] = bf.get_value(), bf.get_mask()

    return net_keys
Exemplo n.º 7
0
def ks():
    keyspace = BitField()
    keyspace.add_field("x", length=8, start_at=24, tags="routing")
    keyspace.add_field("y", length=8, start_at=16, tags="routing")
    keyspace.add_field("p", length=5, start_at=11, tags="routing")
    keyspace.add_field("c", length=11, start_at=0)
    keyspace.assign_fields()
    return keyspace
Exemplo n.º 8
0
def ks():
    keyspace = BitField()
    keyspace.add_field("x", length=8, start_at=24, tags="routing")
    keyspace.add_field("y", length=8, start_at=16, tags="routing")
    keyspace.add_field("p", length=5, start_at=11, tags="routing")
    keyspace.add_field("c", length=11, start_at=0)
    keyspace.assign_fields()
    return keyspace
Exemplo n.º 9
0
class ConvNet(object):
    def __init__(self, neuron_threshold, neuron_decay, test_data,
                 timer_period_us=20000, sim_ticks=200, num_profile_samples=None):
        # Cache network parameters
        self._neuron_threshold = neuron_threshold
        self._neuron_decay = neuron_decay
        self._test_data = test_data
        self._timer_period_us = timer_period_us
        self._sim_ticks = sim_ticks
        self._num_profile_samples = num_profile_samples

        self._vert_index = 0

        # Create data structures
        self._layers = []
        self._vertex_applications = {}
        self._vertex_resources = {}

        # Create a 32-bit keyspace
        self._keyspace = BitField(32)
        self._keyspace.add_field("vert_index", tags="routing")
        self._keyspace.add_field("z", start_at=16)
        self._keyspace.add_field("y", length=8, start_at=8)
        self._keyspace.add_field("x", length=8, start_at=0)

    # ------------------------------------------------------------------------
    # Public methods
    # ------------------------------------------------------------------------
    def add_layer(self, output_width, output_height, padding, stride, weights,
                  record_spikes):
        # Get index of new layer
        layer_index = len(self._layers)

        # Add layer to conv net
        self._layers.append(
            ConvNeuronLayer(start_vert_index=self._vert_index,
                            output_width=output_width,
                            output_height=output_height,
                            padding=padding, stride=stride,
                            neuron_decay=self._neuron_decay,
                            neuron_threshold=self._neuron_threshold,
                            record_spikes=record_spikes,
                            weights=weights, parent_keyspace=self._keyspace,
                            input_data=(self._test_data if layer_index == 0
                                        else None),
                            vertex_applications=self._vertex_applications,
                            vertex_resources=self._vertex_resources,
                            timer_period_us=self._timer_period_us,
                            sim_ticks=self._sim_ticks,
                            num_profile_samples=self._num_profile_samples))

        # **YUCK** update vertex index
        self._vert_index += len(self._layers[-1].vertices)

    def run(self, spinnaker_hostname, disable_software_watchdog=False):
        logger.info("Assigning keyspaces")

        # Finalise keyspace fields
        self._keyspace.assign_fields()

        # Extract position and length of z-field in keyspace
        z_loc, z_length = self._keyspace.get_location_and_length("z")
        z_mask = (1 << z_length) - 1
        logger.debug("Z location:%u, length:%u, mask:%08x",
                        z_loc, z_length, z_mask)

        # Loop through layers and their successors
        logger.info("Building nets")
        nets = []
        net_keys = {}
        for layer, next_layer in zip(self._layers[:-1], self._layers[1:]):
            # Loop through all vertices in layer
            for vertex in layer.vertices:
                # Create a key for the vertex feeding forward
                net_key = (vertex.routing_key, vertex.routing_mask)

                # Create a net connecting vertex
                # to all vertices in next layer
                net = Net(vertex, next_layer.vertices)

                # Add net to list and associate with key
                nets.append(net)
                net_keys[net] = net_key

        machine_controller = None
        try:
            # Get machine controller from connected SpiNNaker board and boot
            machine_controller = MachineController(spinnaker_hostname)
            machine_controller.boot()

            # Get system info
            system_info = machine_controller.get_system_info()
            logger.debug("Found %u chip machine", len(system_info))

            # Place-and-route
            logger.info("Placing and routing")
            placements, allocations, run_app_map, routing_tables =\
                place_and_route_wrapper(self._vertex_resources,
                                        self._vertex_applications,
                                        nets, net_keys, system_info)

            # Convert placement values to a set to get unique list of chips
            unique_chips = set(itervalues(placements))
            logger.info("Placed on %u cores (%u chips)",
                        len(placements), len(unique_chips))
            logger.debug(list(itervalues(placements)))

            # If software watchdog is disabled, write zero to each chip in
            # placement's SV struct, otherwise, write default from SV struct file
            wdog = (0 if disable_software_watchdog else
                    machine_controller.structs["sv"]["soft_wdog"].default)
            for x, y in unique_chips:
                logger.debug("Setting software watchdog to %u for chip %u, %u",
                            wdog, x, y)
                machine_controller.write_struct_field("sv", "soft_wdog",
                                                    wdog, x, y)

            logger.info("Loading layers")
            for i, l in enumerate(self._layers):
                logger.info("\tLayer %u", i)
                l.load(placements, allocations, machine_controller, z_mask)

            # Load routing tables and applications
            logger.info("Loading routing tables")
            machine_controller.load_routing_tables(routing_tables)

            logger.info("Loading applications")
            machine_controller.load_application(run_app_map)

            # Wait for all cores to hit SYNC0
            logger.info("Waiting for synch")
            num_verts = len(self._vertex_resources)
            self._wait_for_transition(placements, allocations,
                                      machine_controller,
                                      AppState.init, AppState.sync0,
                                      num_verts)

            # Sync!
            machine_controller.send_signal("sync0")

            # Wait for simulation to complete
            logger.info("Simulating")
            time.sleep(float(self._timer_period_us * self._sim_ticks) / 1000000.0)

            # Wait for all cores to exit
            logger.info("Waiting for exit")
            self._wait_for_transition(placements, allocations,
                                      machine_controller,
                                      AppState.run, AppState.exit,
                                      num_verts)

            logger.info("Reading stats")
            for i, l in enumerate(self._layers):
                stats = l.read_statistics()
                logger.info("\tLayer %u", i)
                logger.info("\t\tInput buffer overflows:%u",
                            np.sum(stats["input_buffer_overflows"]))
                logger.info("\t\tTask queue overflows:%u",
                            np.sum(stats["task_queue_full"]))
                logger.info("\t\tTimer event overruns:%u",
                            np.sum(stats["timer_event_overflows"]))
                logger.info("\t\tSpikes emitted:%u",
                            np.sum(stats["spikes_emitted"]))
                logger.info("\t\tSpikes convolved:%u",
                            np.sum(stats["spikes_convolved"]))

            if self._num_profile_samples is not None:
                logger.info("Reading profiling data")

                timestep_ms = self._timer_period_us / 1000.0
                duration_ms = timestep_ms * self._sim_ticks

                for i, l in enumerate(self._layers):
                    profiling_data = l.read_profile()[0][1]
                    logger.info("\tLayer %u", i)
                    #print profiling_data
                    profiling.print_summary(profiling_data, duration_ms,
                                            timestep_ms)


            #logger.info("Downloading spikes")
            # Save off layer data
            for i, l in enumerate(self._layers):
                spikes = l.read_recorded_spikes()
                np.save("layer_%u.npy" % i, spikes)

        finally:
            if machine_controller is not None:
                logger.info("Stopping SpiNNaker application")
                machine_controller.send_signal("stop")

    # ------------------------------------------------------------------------
    # Private methods
    # ------------------------------------------------------------------------
    def _wait_for_transition(self, placements, allocations, machine_controller,
                             from_state, to_state,
                             num_verts, timeout=5.0):
        while True:
            # If no cores are still in from_state, stop
            if machine_controller.count_cores_in_state(from_state) == 0:
                break

            # Wait a bit
            time.sleep(1.0)

        # Wait for all cores to reach to_state
        cores_in_to_state =\
            machine_controller.wait_for_cores_to_reach_state(
                to_state, num_verts, timeout=timeout)
        if cores_in_to_state != num_verts:
            # Loop through all placed vertices
            for vertex, (x, y) in iteritems(placements):
                p = allocations[vertex][machine.Cores].start
                status = machine_controller.get_processor_status(p, x, y)
                if status.cpu_state is not to_state:
                    print("Core ({}, {}, {}) in state {!s}".format(
                        x, y, p, status))
                    print machine_controller.get_iobuf(p, x, y)
            raise Exception("Unexpected core failures "
                            "before reaching %s state (%u/%u)." % (to_state, cores_in_to_state, num_verts))
    def __call__(self, application_graph, graph_mapper, machine_graph,
                 n_keys_map):
        """
        :param application_graph: The application graph
        :param graph_mapper: the mapping between graphs
        :param machine_graph: the machine graph
        :param n_keys_map: the mapping between edges and n keys
        :return: routing information objects
        """
        progress_bar = ProgressBar(
            machine_graph.n_outgoing_edge_partitions * 3,
            "Allocating routing keys")

        # ensure groups are stable and correct
        self._determine_groups(
            machine_graph, graph_mapper, application_graph, n_keys_map,
            progress_bar)

        # define the key space
        bit_field_space = BitField(32)
        field_positions = set()

        # locate however many types of constraints there are
        seen_fields = deduce_types(machine_graph)
        progress_bar.update(machine_graph.n_outgoing_edge_partitions)

        if len(seen_fields) > 1:
            self._adds_application_field_to_the_fields(seen_fields)

        # handle the application space
        self._create_application_space_in_the_bit_field_space(
            bit_field_space, seen_fields, field_positions)

        # assign fields to positions in the space
        bit_field_space.assign_fields()

        # get positions of the flexible fields:
        self._assign_flexi_field_positions(
            bit_field_space, seen_fields, field_positions)

        # create routing_info_allocator
        routing_info = RoutingInfo()
        seen_mask_instances = 0

        # extract keys and masks for each edge from the bitfield
        for partition in machine_graph.outgoing_edge_partitions:
            # get keys and masks
            keys_and_masks, seen_mask_instances = \
                self._extract_keys_and_masks_from_bit_field(
                    partition, bit_field_space, n_keys_map,
                    seen_mask_instances)

            # update routing info for each edge in the partition
            partition_info = PartitionRoutingInfo(keys_and_masks, partition)
            routing_info.add_partition_info(partition_info)

            # update the progress bar again
            progress_bar.update()
        progress_bar.end()

        return routing_info, field_positions