def test_run_too_long():
    s.setup(model_binary_folder=os.path.dirname(__file__))
    s.add_machine_vertex_instance(
        TestRunVertex("test_run_too_long.aplx",
                      ExecutableType.USES_SIMULATION_INTERFACE))
    with pytest.raises(SpinnmanTimeoutException):
        s.run(1000)
    def run(self, mbs, x, y):

        # setup system
        sim.setup(model_binary_module=(
            speed_tracker_with_protocol_search_c_code_version))

        # build verts
        reader = SDRAMReaderAndTransmitterWithProtocol(mbs)
        reader.add_constraint(ChipAndCoreConstraint(x=x, y=y))
        receiver = PacketGathererWithProtocol()

        # add verts to graph
        sim.add_machine_vertex_instance(reader)
        sim.add_machine_vertex_instance(receiver)

        # build and add edge to graph
        sim.add_machine_edge_instance(MachineEdge(reader, receiver),
                                      "TRANSMIT")

        machine = sim.machine()
        if machine.is_chip_at(x, y):
            return self._do_run(reader, receiver, mbs)
        else:
            sim.stop()
            return None, False, False, "", 0
def test_rte_at_start():
    globals_variables.unset_simulator()
    s.setup(model_binary_folder=os.path.dirname(__file__))
    s.add_machine_vertex_instance(
        RunVertex("test_rte_start.aplx",
                  ExecutableType.USES_SIMULATION_INTERFACE))
    with pytest.raises(SpinnmanException):
        s.run(1000)
Beispiel #4
0
    def run(self, mbs):

        # setup system
        sim.setup(model_binary_module=test_extra_monitor_core_data_extraction,
                  n_chips_required=2)

        # build verts
        writer = SDRAMWriter(mbs)

        # add verts to graph
        sim.add_machine_vertex_instance(writer)

        sim.run(12)

        # get placements for extraction
        placements = sim.placements()
        machine = sim.machine()

        writer_placement = placements.get_placement_of_vertex(writer)
        writer_chip = \
            machine.get_chip_at(writer_placement.x, writer_placement.y)
        writer_nearest_ethernet = machine.get_chip_at(
            writer_chip.nearest_ethernet_x, writer_chip.nearest_ethernet_y)

        extra_monitor_vertices = sim.globals_variables.\
            get_simulator()._last_run_outputs['MemoryExtraMonitorVertices']
        extra_monitor_gatherers = sim.globals_variables.\
            get_simulator()._last_run_outputs[
                'MemoryMCGatherVertexToEthernetConnectedChipMapping']

        receiver = None
        gatherer = extra_monitor_gatherers[(writer_nearest_ethernet.x,
                                            writer_nearest_ethernet.y)]
        for vertex in extra_monitor_vertices:
            placement = placements.get_placement_of_vertex(vertex)
            if (placement.x == writer_placement.x
                    and placement.y == writer_placement.y):
                receiver = vertex

        start = float(time.time())

        gatherer.set_cores_for_data_extraction(sim.transceiver(),
                                               extra_monitor_vertices,
                                               placements)
        data = gatherer.get_data(
            sim.transceiver(), placements.get_placement_of_vertex(receiver),
            self._get_data_region_address(sim.transceiver(), writer_placement),
            writer.mbs_in_bytes)
        gatherer.unset_cores_for_data_extraction(sim.transceiver(),
                                                 extra_monitor_vertices,
                                                 placements)
        end = float(time.time())

        print("time taken to extract {} MB is {}. MBS of {}".format(
            mbs, end - start, (mbs * 8) / (end - start)))

        self._check_data(data)
def test_rte_during_run_forever():
    s.setup(model_binary_folder=os.path.dirname(__file__))
    s.add_machine_vertex_instance(
        TestRunVertex("test_rte_during_run.aplx",
                      ExecutableType.USES_SIMULATION_INTERFACE))
    s.run(None)
    sleep(2.0)
    with pytest.raises(ExecutableFailedToStopException):
        s.stop()
Beispiel #6
0
def test_rte_during_run():
    s.setup(model_binary_folder=os.path.dirname(__file__))
    s.add_machine_vertex_instance(
        RunVertex("test_rte_during_run.aplx",
                  ExecutableType.USES_SIMULATION_INTERFACE))
    with pytest.raises(SpinnmanException):
        try:
            s.run(1000)
        except Exception:
            traceback.print_exc()
            raise
Beispiel #7
0
def test_rte_during_run_forever():
    globals_variables.unset_simulator()
    s.setup(model_binary_folder=os.path.dirname(__file__))
    s.add_machine_vertex_instance(
        RunVertex("test_rte_during_run.aplx",
                  ExecutableType.USES_SIMULATION_INTERFACE))
    s.add_socket_address(None, "localhost", conn.local_port)
    s.run(None)
    conn.close()
    with pytest.raises(ExecutableFailedToStopException):
        s.stop()
Beispiel #8
0
    def test_hello_world(self):
        front_end.setup(n_chips_required=1,
                        model_binary_folder=os.path.dirname(__file__))

        # Put HelloWorldVertex onto 16 cores
        total_number_of_cores = 16
        for x in range(total_number_of_cores):
            front_end.add_machine_vertex_instance(
                HelloWorldVertex(n_hellos=10, label=f"Hello World at {x}"))

        front_end.run(10)
        front_end.run(10)

        front_end.stop()
def test_rte_during_run_forever():
    def start():
        sleep(3.0)
        s.stop_run()

    conn = DatabaseConnection(start, local_port=None)
    s.setup(model_binary_folder=os.path.dirname(__file__))
    s.add_machine_vertex_instance(
        RunVertex("test_rte_during_run.aplx",
                  ExecutableType.USES_SIMULATION_INTERFACE))
    s.add_socket_address(None, "localhost", conn.local_port)
    s.run(None)
    with pytest.raises(ExecutableFailedToStopException):
        s.stop()
    conn.close()
Beispiel #10
0
    def check_extra_monitor(self):
        mbs = _TRANSFER_SIZE_MEGABYTES

        # setup system
        globals_variables.unset_simulator()
        sim.setup(model_binary_folder=os.path.dirname(__file__),
                  n_chips_required=2)

        # build verts
        writer_vertex = SDRAMWriter(mbs)

        # add verts to graph
        sim.add_machine_vertex_instance(writer_vertex)
        sim.run(12)

        writer_placement = sim.placements().get_placement_of_vertex(
            writer_vertex)

        # pylint: disable=protected-access
        outputs = sim.globals_variables.get_simulator()._last_run_outputs
        monitor_vertices = outputs[_MONITOR_VERTICES]

        receiver_plt = _get_monitor_placement(monitor_vertices,
                                              writer_placement)
        gatherers, gatherer = _get_gatherer_for_monitor(writer_vertex)

        start = float(time.time())

        data = _do_transfer(gatherer, gatherers, monitor_vertices,
                            receiver_plt, writer_placement, writer_vertex)

        end = float(time.time())

        print(
            "time taken to extract {} MB is {}. Transfer rate: {} Mb/s".format(
                mbs, end - start, (mbs * 8) / (end - start)))

        check_data(data)

        sim.stop()
    def check_extra_monitor(self):
        mbs = _TRANSFER_SIZE_MEGABYTES

        # setup system
        sim.setup(model_binary_folder=os.path.dirname(__file__),
                  n_chips_required=2)

        # build verts
        writer_vertex = SDRAMWriter(mbs)

        # add verts to graph
        sim.add_machine_vertex_instance(writer_vertex)
        sim.run(12)

        writer_placement = sim.placements().get_placement_of_vertex(
            writer_vertex)

        # pylint: disable=protected-access
        monitor_vertices = sim.globals_variables.get_simulator().\
            _extra_monitor_to_chip_mapping

        receiver_plt = _get_monitor_placement(monitor_vertices,
                                              writer_placement)
        gatherers, gatherer = _get_gatherer_for_monitor(writer_vertex)

        start = float(time.time())

        data = _do_transfer(gatherer, gatherers, monitor_vertices,
                            receiver_plt, writer_placement, writer_vertex)

        end = float(time.time())

        print(f"time taken to extract {mbs} MB is {end - start}. "
              f"Transfer rate: {(mbs * 8) / (end - start)} Mb/s")

        check_data(data)

        sim.stop()
import spinnaker_graph_front_end as front_end
from pacman.model.constraints.placer_constraints import ChipAndCoreConstraint

from spinnaker_graph_front_end.examples.hello_world.hello_world_vertex\
    import HelloWorldVertex

import logging
import os

from spinnaker_graph_front_end.examples.test_fixed_router.\
    hello_world_vertex_clone import HelloWorldVertexClone

logger = logging.getLogger(__name__)

front_end.setup(n_chips_required=None, model_binary_folder=os.getcwd())
front_end.globals_variables.get_simulator().update_extra_mapping_inputs(
    {"FixedRouteDestinationClass": HelloWorldVertexClone})

# fill all cores with a HelloWorldVertex each
front_end.add_machine_vertex_instance(
    HelloWorldVertex(label="transmitter",
                     constraints=[ChipAndCoreConstraint(x=1, y=1)]))
front_end.add_machine_vertex_instance(
    HelloWorldVertexClone(label="the clone!",
                          constraints=[ChipAndCoreConstraint(x=0, y=0)]))

front_end.run(10)

front_end.stop()
    def run(self, mbs, number_of_repeats):

        # setup system
        sim.setup(model_binary_module=(
            test_extra_monitor_core_data_extraction_multiple_locations),
                  n_chips_required=49 * 2)

        # build vertices
        locs = [(0, 0), (2, 2), (7, 7), (3, 0), (1, 0), (0, 1), (3, 3), (4, 4),
                (5, 5), (3, 5), (4, 0), (7, 4), (8, 4), (4, 8), (11, 11),
                (11, 0), (0, 11), (6, 3), (0, 6)]
        writers = list()

        for chip_x, chip_y in locs:
            writer = SDRAMWriter(mbs,
                                 constraint=ChipAndCoreConstraint(
                                     chip_x, chip_y))
            # add vertices to graph
            sim.add_machine_vertex_instance(writer)
            writers.append(writer)

        sim.run(12)

        # get placements for extraction
        placements = sim.placements()
        machine = sim.machine()

        extra_monitor_vertices = sim.globals_variables. \
            get_simulator()._last_run_outputs[
                'MemoryExtraMonitorVertices']
        extra_monitor_gatherers = sim.globals_variables. \
            get_simulator()._last_run_outputs[
                'MemoryMCGatherVertexToEthernetConnectedChipMapping']
        time_out_setter = extra_monitor_gatherers[(0, 0)]

        time_out_setter.set_cores_for_data_extraction(sim.transceiver(),
                                                      extra_monitor_vertices,
                                                      placements)

        for _ in range(0, number_of_repeats):
            for writer in writers:

                writer_placement = placements.get_placement_of_vertex(writer)
                writer_chip = \
                    machine.get_chip_at(writer_placement.x, writer_placement.y)
                writer_nearest_ethernet = machine.get_chip_at(
                    writer_chip.nearest_ethernet_x,
                    writer_chip.nearest_ethernet_y)

                receiver = None
                gatherer = extra_monitor_gatherers[(writer_nearest_ethernet.x,
                                                    writer_nearest_ethernet.y)]
                for vertex in extra_monitor_vertices:
                    placement = placements.get_placement_of_vertex(vertex)
                    if (placement.x == writer_placement.x
                            and placement.y == writer_placement.y):
                        receiver = vertex

                start = float(time.time())
                data = gatherer.get_data(
                    sim.transceiver(),
                    placements.get_placement_of_vertex(receiver),
                    self._get_data_region_address(sim.transceiver(),
                                                  writer_placement),
                    writer.mbs_in_bytes)
                end = float(time.time())

                print("time taken to extract {} MB is {}. MBS of {}".format(
                    mbs, end - start, (mbs * 8) / (end - start)))

                self._check_data(data)

        time_out_setter.unset_cores_for_data_extraction(
            sim.transceiver(), extra_monitor_vertices, placements)
from spinnaker_graph_front_end.examples import speed_test_solo

# data to write
from spinnaker_graph_front_end.examples.speed_test_solo.\
    packet_gatherer_cheat import PacketGathererCheat

mbs = 1.0

# setup system
sim.setup(model_binary_module=speed_test_solo)

# build verts
receiver = PacketGathererCheat(mbs, 1)

# add verts to graph
sim.add_machine_vertex_instance(receiver)

# run forever (to allow better speed testing)
sim.run()

# get placements for extraction
placements = sim.placements()

# try getting data via mc transmission
start = None
end = None
data = None

sim.transceiver().set_watch_dog(False)

try:
def run_model(data, n_chips=None, n_ihcan=0, fs=44100, resample_factor=1):

    # Set up the simulation
    g.setup(n_chips_required=n_chips, model_binary_module=model_binaries)

    # Get the number of cores available for use
    n_cores = 0
    machine = g.machine()

    # Create a OME for each chip
    boards = dict()

    #changed to lists to ensure data is read back in the same order that verticies are instantiated
    ihcans = list()

    cf_index = 0
    count = 0
    for chip in machine.chips:
        if count >= n_chips:
            break
        else:
            boards[chip.x, chip.y] = chip.ip_address

            for j in range(n_ihcan):
                ihcan = IHCANVertex(data[j][:], fs, resample_factor)
                g.add_machine_vertex_instance(ihcan)
                # constrain placement to local chip
                ihcan.add_constraint(ChipAndCoreConstraint(chip.x, chip.y))
                #ihcans[chip.x, chip.y,j] = ihcan
                ihcans.append(ihcan)

            count = count + 1

# Run the simulation
    g.run(None)

    # Wait for the application to finish
    txrx = g.transceiver()
    app_id = globals_variables.get_simulator()._app_id
    #logger.info("Running {} worker cores".format(n_workers))
    logger.info("Waiting for application to finish...")
    running = txrx.get_core_state_count(app_id, CPUState.RUNNING)
    while running > 0:
        time.sleep(0.5)
        error = txrx.get_core_state_count(app_id, CPUState.RUN_TIME_EXCEPTION)
        watchdog = txrx.get_core_state_count(app_id, CPUState.WATCHDOG)
        if error > 0 or watchdog > 0:
            error_msg = "Some cores have failed ({} RTE, {} WDOG)".format(
                error, watchdog)
            raise Exception(error_msg)
        running = txrx.get_core_state_count(app_id, CPUState.RUNNING)

    # Get the data back
    samples = list()
    progress = ProgressBar(len(ihcans), "Reading results")

    for ihcan in ihcans:
        samples.append(ihcan.read_samples(g.buffer_manager()))
        progress.update()
    progress.end()
    samples = numpy.hstack(samples)

    # Close the machine
    g.stop()

    print "channels running: ", len(ihcans) / 5.0
    print "output data: {} fibres with length {}".format(
        len(ihcans) * 2, len(samples))
    #if(len(samples) != len(ihcans)*2*numpy.floor(len(data[0][0])/100)*100*(1.0/resample_factor)):
    if (len(samples) !=
            len(ihcans) * 2 * numpy.floor(len(data[0][0]) / 96) * 96):
        #print "samples length {} isn't expected size {}".format(len(samples),len(ihcans)*2*numpy.floor(len(data[0][0])/100)*100*(1.0/resample_factor))
        print "samples length {} isn't expected size {}".format(
            len(samples),
            len(ihcans) * 2 * numpy.floor(len(data[0][0]) / 96) * 96)

    return samples
Beispiel #16
0
def add_machine_vertex_instance(machine_vertex):
    front_end.add_machine_vertex_instance(machine_vertex)
if cores <= (MAX_X_SIZE_OF_FABRIC * MAX_Y_SIZE_OF_FABRIC):
    raise KeyError("Don't have enough cores to run simulation")

# contain the vertices for the connection aspect
vertices = [[None for _ in range(MAX_X_SIZE_OF_FABRIC)]
            for _ in range(MAX_Y_SIZE_OF_FABRIC)]

active_states = [(2, 2), (3, 2), (3, 3), (4, 3), (2, 4)]

# build vertices
for x in range(0, MAX_X_SIZE_OF_FABRIC):
    for y in range(0, MAX_Y_SIZE_OF_FABRIC):
        vert = ConwayBasicCell("cell{}".format((x * MAX_X_SIZE_OF_FABRIC) + y),
                               (x, y) in active_states)
        vertices[x][y] = vert
        front_end.add_machine_vertex_instance(vert)

# verify the initial state
output = ""
for y in range(MAX_X_SIZE_OF_FABRIC - 1, 0, -1):
    for x in range(0, MAX_Y_SIZE_OF_FABRIC):
        if vertices[x][y].state:
            output += "X"
        else:
            output += " "
    output += "\n"
print output
print "\n\n"

# build edges
for x in range(0, MAX_X_SIZE_OF_FABRIC):
Beispiel #18
0
front_end.setup(n_chips_required=n_chips_required,
                model_binary_module=cbin,
                machine_time_step=machine_time_step)

# VERTICES
particle_list = list()
filter_list = list()

# create "input"
# running with test data use this vertex
if use_spinn_link:
    # when running on the icub we'll need this vertex
    input_vertex = ICUBInputVertex(spinnaker_link_id=spinnaker_link_used,
                                   board_address=None,
                                   label="Input Vertex")
    front_end.add_machine_vertex_instance(input_vertex)

else:
    input_vertex = ReverseIPTagMulticastSourceMachineVertex(
        virtual_key=constants.RETINA_BASE_KEY,
        buffer_notification_ip_address="0.0.0.0",
        n_keys=1048576,
        send_buffer_max_space=20 * 1024 * 1024,
        label="Input Vertex",
        send_buffer_times=spike_train)
    front_end.add_machine_vertex_instance(input_vertex)

# create "output"
output_vertex = ICUBOutputVertex(spinnaker_link_id=spinnaker_link_used,
                                 board_address=None,
                                 label="Output Vertex")
"Hello World from $chip.x, $chip.y, $core"

We then fetch the written data and print it on the python console.
"""

import os
import spinnaker_graph_front_end as front_end
from gfe_examples.hello_world.hello_world_vertex import HelloWorldVertex

front_end.setup(n_chips_required=1,
                model_binary_folder=os.path.dirname(__file__))

# Put HelloWorldVertex onto 16 cores
total_number_of_cores = 16
for x in range(total_number_of_cores):
    front_end.add_machine_vertex_instance(
        HelloWorldVertex(n_hellos=10, label=f"Hello World at {x}"))

front_end.run(10)
front_end.run(10)

if not front_end.use_virtual_machine():
    for placement in sorted(front_end.placements().placements,
                            key=lambda p: (p.x, p.y, p.p)):
        if isinstance(placement.vertex, HelloWorldVertex):
            hello_world = placement.vertex.read()
            print(
                f"{placement.x}, {placement.y}, {placement.p} > {hello_world}")

front_end.stop()
Beispiel #20
0
import logging
import os
import spinnaker_graph_front_end as front_end
from spinnaker_graph_front_end.examples.hello_world.hello_world_vertex import (
    HelloWorldVertex)

logger = logging.getLogger(__name__)

front_end.setup(
    n_chips_required=1, model_binary_folder=os.path.dirname(__file__))

# Put HelloWorldVertex onto 16 cores
total_number_of_cores = 16
for x in range(total_number_of_cores):
    front_end.add_machine_vertex_instance(
        HelloWorldVertex(n_hellos=10, label="Hello World at {}".format(x)))

front_end.run(10)
front_end.run(10)

placements = front_end.placements()

if not front_end.use_virtual_machine():
    buffer_manager = front_end.buffer_manager()
    for placement in sorted(placements.placements,
                            key=lambda p: (p.x, p.y, p.p)):

        if isinstance(placement.vertex, HelloWorldVertex):
            hello_world = placement.vertex.read(placement, buffer_manager)
            logger.info("{}, {}, {} > {}".format(
                placement.x, placement.y, placement.p, hello_world))
    def __init__(self, network, group, subgroup):

        max_links = MLPConstants.MAX_S_CORE_LINKS

        # total number of Sum Vertices needed to build the tree
        num_vrt = ((network.subgroups - 2) // (max_links - 1)) + 1

        # the root vertex is used as pre-vertex for outgoing links
        self._root = SumVertex(network, group, subgroup, 0)

        # add the root to the graph
        gfe.add_machine_vertex_instance(self.root)

        # and to the list of all tree vertices
        self._vertices = [self.root]

        # create the SumVertex tree
        free_links = max_links
        to_vrt = 0
        for vrt in range(1, num_vrt):
            # create a SumVertex
            vt = SumVertex(network, group, subgroup, vrt)

            # add it to the list of vertices
            self._vertices.append(vt)

            # add it to the graph
            gfe.add_machine_vertex_instance(vt)

            # add all SumVertex links towards the tree root
            gfe.add_machine_edge_instance(
                MachineEdge(vt, self.vertices[to_vrt]), vt.fwd_link)

            gfe.add_machine_edge_instance(
                MachineEdge(vt, self.vertices[to_vrt]), vt.bkp_link)

            gfe.add_machine_edge_instance(
                MachineEdge(vt, self.vertices[to_vrt]), vt.lds_link)

            gfe.add_machine_edge_instance(
                MachineEdge(vt, self.vertices[to_vrt]), vt.fsg_link)

            # take away one free link from vertex to_vrt
            free_links -= 1

            # if out of free links use next available vertex
            if free_links == 0:
                free_links = max_links
                to_vrt += 1

        # finally, map every pre-vertex to an available tree vertex
        self._leaf_map = {}
        for grp in network.groups:
            for sgrp in range(grp.subgroups):
                # assign available leaf vertex
                self._leaf_map[(grp.id, sgrp)] = self.vertices[to_vrt]

                # take away one free link from vertex to_vrt
                free_links -= 1

                # if out of free links use next available vertex
                if free_links == 0:
                    free_links = max_links
                    to_vrt += 1
from spinn_utilities.log import FormatAdapter
import spinnaker_graph_front_end as front_end
from gfe_examples.hello_world_untimed.hello_world_vertex import (
    HelloWorldVertex)

logger = FormatAdapter(logging.getLogger(__name__))

front_end.setup(n_chips_required=1,
                model_binary_folder=os.path.dirname(__file__))

# Put HelloWorldVertex onto 16 cores
total_number_of_cores = 16
prints_per_run = 10
runs = 2
for x in range(total_number_of_cores):
    front_end.add_machine_vertex_instance(
        HelloWorldVertex(label=f"Hello World {x}"))

for _ in range(runs):
    front_end.run_until_complete(prints_per_run)

if not front_end.use_virtual_machine():
    for placement in sorted(front_end.placements().placements,
                            key=lambda p: (p.x, p.y, p.p)):

        if isinstance(placement.vertex, HelloWorldVertex):
            hello_world = placement.vertex.read()
            logger.info("{}, {}, {} > {}", placement.x, placement.y,
                        placement.p, hello_world)

front_end.stop()
Beispiel #23
0
def run_mcmc(model,
             data,
             n_samples,
             burn_in=2000,
             thinning=5,
             degrees_of_freedom=3.0,
             seed=None,
             n_chips=None,
             n_boards=None):
    """ Executes an MCMC model, returning the received samples

    :param model: The MCMCModel to be used
    :param data: The data to sample
    :param n_samples: The number of samples to generate
    :param burn_in:\
        no of MCMC transitions to reach apparent equilibrium before\
        generating inference samples
    :param thinning:\
        sampling rate i.e. 5 = 1 sample for 5 generated steps
    :param degrees_of_freedom:\
        The number of degrees of freedom to jump around with
    :param seed: The random seed to use
    :param n_chips: The number of chips to run the model on
    :param root_finder: Use the root finder by adding root finder vertices
    :param cholesky: Use the Cholesky algorithm by adding Cholesky vertices

    :return: The samples read
    :rtype: A numpy array with fields for each model state variable
    """

    # Set up the simulation
    g.setup(n_boards_required=n_boards,
            n_chips_required=n_chips,
            model_binary_module=model_binaries)

    # Get the number of cores available for use
    n_cores = 0
    machine = g.machine()

    # Create a coordinator for each board
    coordinators = dict()
    boards = dict()
    for chip in machine.ethernet_connected_chips:

        # Create a coordinator
        coordinator = MCMCCoordinatorVertex(model, data, n_samples, burn_in,
                                            thinning, degrees_of_freedom, seed)
        g.add_machine_vertex_instance(coordinator)

        # Put the coordinator on the Ethernet chip
        coordinator.add_constraint(ChipAndCoreConstraint(chip.x, chip.y))
        coordinators[chip.x, chip.y] = coordinator
        boards[chip.x, chip.y] = chip.ip_address

    # Go through all the chips and add the workhorses
    n_chips_on_machine = machine.n_chips
    n_workers = 0
    if (model.root_finder):
        n_root_finders = 0
    if (model.cholesky):
        n_cholesky = 0
    for chip in machine.chips:

        # Count the cores in the processor
        # (-1 if this chip also has a coordinator)
        n_cores = len([p for p in chip.processors if not p.is_monitor])
        if (chip.x, chip.y) in coordinators:
            n_cores -= 3  # coordinator and extra_monitor_support (2)
            if (model.root_finder):
                if (model.cholesky):
                    n_cores = n_cores // 3
                else:
                    n_cores = n_cores // 2
        else:
            n_cores -= 1  # just extra_monitor_support
            if (model.root_finder):
                if (model.cholesky):
                    n_cores = n_cores // 3
                else:
                    n_cores = n_cores // 2

        # Find the coordinator for the board (or 0, 0 if it is missing)
        eth_x = chip.nearest_ethernet_x
        eth_y = chip.nearest_ethernet_y
        coordinator = coordinators.get((eth_x, eth_y))
        if coordinator is None:
            print("Warning - couldn't find {}, {} for chip {}, {}".format(
                eth_x, eth_y, chip.x, chip.y))
            coordinator = coordinators[0, 0]
            print("Using coordinator ", coordinator)

        # hard-code remove some cores (chip power monitor etc.) just
        # to see what happens
#        n_cores -= non_worker_cores_per_chip
#        print 'n_cores: ', n_cores

# Add a vertex for each core
        for _ in range(n_cores):

            # Create the vertex and add it to the graph
            vertex = MCMCVertex(coordinator, model)
            n_workers += 1
            g.add_machine_vertex_instance(vertex)

            # Put the vertex on the same board as the coordinator
            vertex.add_constraint(ChipAndCoreConstraint(chip.x, chip.y))

            # Add an edge from the coordinator to the vertex, to send the data
            g.add_machine_edge_instance(MachineEdge(coordinator, vertex),
                                        coordinator.data_partition_name)

            # Add an edge from the vertex to the coordinator,
            # to send acknowledgement
            g.add_machine_edge_instance(MachineEdge(vertex, coordinator),
                                        coordinator.acknowledge_partition_name)

            if (model.root_finder):
                # Create a root finder vertex
                rf_vertex = MCMCRootFinderVertex(vertex, model)
                n_root_finders += 1
                g.add_machine_vertex_instance(rf_vertex)

                # put it on the same chip as the standard mcmc vertex?
                # no - put it on a "nearby" chip, however that works
                rf_vertex.add_constraint(ChipAndCoreConstraint(chip.x, chip.y))

                # Add an edge from mcmc vertex to root finder vertex,
                # to "send" the data - need to work this out
                g.add_machine_edge_instance(MachineEdge(vertex, rf_vertex),
                                            vertex.parameter_partition_name)

                # Add edge from root finder vertex back to mcmc vertex
                # to send acknowledgement / result - need to work this out
                g.add_machine_edge_instance(MachineEdge(rf_vertex, vertex),
                                            vertex.result_partition_name)

            if (model.cholesky):
                # Create a Cholesky vertex
                cholesky_vertex = MCMCCholeskyVertex(vertex, model)
                n_cholesky += 1
                g.add_machine_vertex_instance(cholesky_vertex)

                # put it on the same chip as the standard mcmc vertex?
                # no - put it on a "nearby" chip, however that works
                cholesky_vertex.add_constraint(
                    ChipAndCoreConstraint(chip.x, chip.y))

                # Add an edge from mcmc vertex to Cholesky vertex,
                # to "send" the data - need to work this out
                g.add_machine_edge_instance(
                    MachineEdge(vertex, cholesky_vertex),
                    vertex.cholesky_partition_name)

                # Add edge from Cholesky vertex back to mcmc vertex
                # to send acknowledgement / result - need to work this out
                g.add_machine_edge_instance(
                    MachineEdge(cholesky_vertex, vertex),
                    vertex.cholesky_result_partition_name)

    start_computing_time = time.time()

    logger.info("n_chips_on_machine {}".format(n_chips_on_machine))
    logger.info("Running {} worker cores".format(n_workers))
    if (model.root_finder):
        logger.info("Running {} root finder cores".format(n_root_finders))
    if (model.cholesky):
        logger.info("Running {} Cholesky cores".format(n_cholesky))

    # Run the simulation
    g.run_until_complete()

    mid_computing_time = time.time()

    # Wait for the application to finish
    txrx = g.transceiver()
    app_id = globals_variables.get_simulator()._app_id
    logger.info("Running {} worker cores".format(n_workers))
    if (model.root_finder):
        logger.info("Running {} root finder cores".format(n_root_finders))
    if (model.cholesky):
        logger.info("Running {} Cholesky cores".format(n_cholesky))
    logger.info("Waiting for application to finish...")
    running = txrx.get_core_state_count(app_id, CPUState.RUNNING)
    # there are now cores doing extra_monitor etc.
    non_worker_cores = n_chips_on_machine + (2 * len(boards))
    while running > non_worker_cores:
        time.sleep(0.5)
        error = txrx.get_core_state_count(app_id, CPUState.RUN_TIME_EXCEPTION)
        watchdog = txrx.get_core_state_count(app_id, CPUState.WATCHDOG)
        if error > 0 or watchdog > 0:
            error_msg = "Some cores have failed ({} RTE, {} WDOG)".format(
                error, watchdog)
            raise Exception(error_msg)
        running = txrx.get_core_state_count(app_id, CPUState.RUNNING)
        print('running: ', running)

    finish_computing_time = time.time()

    # Get the data back
    samples = dict()
    for coord, coordinator in iteritems(coordinators):
        samples[coord[0],
                coord[1]] = coordinator.read_samples(g.buffer_manager())

    # Close the machine
    g.stop()

    finish_time = time.time()

    # Note: this timing appears to be incorrect now; needs looking at
    print("Overhead time is %s seconds" % (start_computing_time - start_time))
    print("Computing time is %s seconds" %
          (finish_computing_time - start_computing_time))
    print("run_until_complete takes %s seconds" %
          (mid_computing_time - start_computing_time))
    print("Data collecting time is %s seconds" %
          (finish_time - finish_computing_time))
    print("Overall running time is %s seconds" % (finish_time - start_time))

    return samples
    def generate_machine_graph (self):
        """ generates a machine graph for the application graph
        """
        print "generating machine graph"

        # setup the machine graph
        g.setup ()

        # set the number of write blocks before generating vertices
        self._num_write_blks = len (self.output_chain)

        # create associated weight, sum, input and threshold
        # machine vertices for every network group
        for grp in self.groups:
            # create one weight core per (from_group, group) pair
            # NOTE: all-zero cores can be optimised out
            for from_grp in self.groups:
                wv = WeightVertex (self, grp, from_grp)
                grp.w_vertices.append (wv)
                g.add_machine_vertex_instance (wv)
                self._num_vertices += 1

            # create one sum core per group
            sv = SumVertex (self, grp)
            grp.s_vertex = sv
            g.add_machine_vertex_instance (sv)
            self._num_vertices += 1

            # create one input core per group
            iv = InputVertex (self, grp)
            grp.i_vertex = iv
            g.add_machine_vertex_instance (iv)
            self._num_vertices += 1

            # create one sum core per group
            tv = ThresholdVertex (self, grp)
            grp.t_vertex = tv
            g.add_machine_vertex_instance (tv)
            self._num_vertices += 1

        # create associated forward, backprop, synchronisation and
        # stop machine edges for every network group
        first = self.groups[0]
        for grp in self.groups:
            for w in grp.w_vertices:
                _frmg = w.from_group

                # create forward w to s links
                g.add_machine_edge_instance (MachineEdge (w, grp.s_vertex),
                                             w.fwd_link)

                # create forward t to w (multicast) links
                g.add_machine_edge_instance (MachineEdge (_frmg.t_vertex, w),
                                             _frmg.t_vertex.fwd_link)

                # create backprop w to s links
                g.add_machine_edge_instance (MachineEdge (w, _frmg.s_vertex),
                                             w.bkp_link)

                # create backprop i to w (multicast) links
                g.add_machine_edge_instance (MachineEdge (grp.i_vertex, w),
                                             grp.i_vertex.bkp_link)

                # create forward synchronisation w to t links
                g.add_machine_edge_instance (MachineEdge (w, _frmg.t_vertex),
                                             w.fds_link)

                # create link delta summation w to s links
                g.add_machine_edge_instance (MachineEdge (w, grp.s_vertex),
                                             w.lds_link)

                # create link delta summation result s (first) to w links
                g.add_machine_edge_instance (MachineEdge (first.s_vertex, w),
                                             first.s_vertex.lds_link)

            # create forward s to i link
            g.add_machine_edge_instance (MachineEdge (grp.s_vertex,
                                                      grp.i_vertex),
                                         grp.s_vertex.fwd_link)

            # create backprop s to t link
            g.add_machine_edge_instance (MachineEdge (grp.s_vertex,
                                                      grp.t_vertex),
                                         grp.s_vertex.bkp_link)

            # create forward i to t link
            g.add_machine_edge_instance (MachineEdge (grp.i_vertex,
                                                      grp.t_vertex),
                                         grp.i_vertex.fwd_link)

            # create backprop t to i link
            g.add_machine_edge_instance (MachineEdge (grp.t_vertex,
                                                      grp.i_vertex),
                                         grp.t_vertex.bkp_link)

            # create link delta summation s to s links - all s cores
            # (except the first) send to the first s core
            if grp != first:
                print "Creating lds s-s edge from group {} to group {}".\
                    format (grp.label, first.label)
                g.add_machine_edge_instance (MachineEdge (grp.s_vertex,
                                                          first.s_vertex),
                                             grp.s_vertex.lds_link)

            # create stop links, if OUTPUT group
            if grp in self.output_chain:
                # if last OUTPUT group broadcast stop decision
                if grp == self.output_chain[-1]:
                    for stpg in self.groups:
                        # create stop links to all w cores
                        for w in stpg.w_vertices:
                            g.add_machine_edge_instance\
                              (MachineEdge (grp.t_vertex, w),
                               grp.t_vertex.stp_link)

                        # create stop links to all s cores
                        g.add_machine_edge_instance\
                         (MachineEdge (grp.t_vertex, stpg.s_vertex),\
                          grp.t_vertex.stp_link)

                        # create stop links to all i cores
                        g.add_machine_edge_instance\
                         (MachineEdge (grp.t_vertex, stpg.i_vertex),\
                          grp.t_vertex.stp_link)

                        # create stop links to t cores (no link to itself!)
                        if stpg != grp:
                            g.add_machine_edge_instance\
                             (MachineEdge (grp.t_vertex, stpg.t_vertex),\
                              grp.t_vertex.stp_link)
                else:
                    # create stop link to next OUTPUT group in chain
                    _inx  = self.output_chain.index (grp)
                    _stpg = self.output_chain[_inx + 1]
                    g.add_machine_edge_instance (MachineEdge (grp.t_vertex,
                                                              _stpg.t_vertex),
                                                 grp.t_vertex.stp_link)
Beispiel #25
0
# Copyright (c) 2017-2019 The University of Manchester
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.

import spinnaker_graph_front_end as front_end
from spinnaker_graph_front_end.examples.Conways.no_edges_examples.\
    conways_basic_cell import (
        ConwayBasicCell)

# set up the front end and ask for the detected machines dimensions
front_end.setup()

for count in range(0, 60):
    front_end.add_machine_vertex_instance(
        ConwayBasicCell("cell{}".format(count)))

front_end.run(1)
front_end.stop()
Beispiel #26
0
    def generate_machine_graph (self):
        """ generates a machine graph for the application graph
        """
        print ("generating machine graph")

        # path to binary files
        binaries_path = os.path.join(os.path.dirname(__file__), "..", "binaries")

        # estimate number of SpiNNaker boards required
        # number of subgroups
        for grp in self.groups:
            self.subgroups += grp.subgroups

        # number of required cores
        w_cores = self.subgroups * self.subgroups
        s_cores = self.subgroups * (((self.subgroups - 2) //
                                    (MLPConstants.MAX_S_CORE_LINKS - 1)) + 1)
        i_cores = self.subgroups
        t_cores = self.subgroups
        cores = w_cores + s_cores + i_cores + t_cores

        s = '' if cores == 1 else 's'
        print (f"need {cores} SpiNNaker core{s}")

        # number of required chips
        chips = ((cores - 1) // MLPConstants.DEF_SPINN_CORES_PER_CHIP) + 1

        s = '' if chips == 1 else 's'
        print (f"estimating {chips} SpiNNaker chip{s}")

        # number of required boards
        boards = ((chips - 1) // MLPConstants.DEF_SPINN_CHIPS_PER_BOARD) + 1

        s = '' if boards == 1 else 's'
        print (f"requesting {boards} SpiNNaker board{s}")

        # request a SpiNNaker machine and setup the machine graph
        try:
            gfe.setup (model_binary_folder = binaries_path,
                       n_boards_required = boards
                       )
        except Exception as err:
            print ("\n--------------------------------------------------")
            print (f"error: {err}")
            print ("--------------------------------------------------\n")
            return False

        # create weight, sum, input and threshold
        # machine vertices associated with every subgroup
        for grp in self.groups:
            for sgrp in range (grp.subgroups):
                # create one weight core for every
                # (from_group/from_subgroup, group/subgroup) pair
                #TODO: all-zero cores can be optimised out
                wvs = []
                for from_grp in self.groups:
                    for from_sgrp in range (from_grp.subgroups):
                        wv = WeightVertex (self, grp, sgrp,
                                           from_grp, from_sgrp)
                        gfe.add_machine_vertex_instance (wv)
                        wvs.append (wv)
                grp.w_vertices.append (wvs)

                # create a sum core tree per subgroup
                #NOTE: sum vertices are added during tree building
                svt = SumVertexTree (self, grp, sgrp)
                grp.s_vertex.append (svt)

                # create one input core per subgroup
                iv = InputVertex (self, grp, sgrp)
                grp.i_vertex.append (iv)
                gfe.add_machine_vertex_instance (iv)

                # create one threshold core per subgroup
                tv = ThresholdVertex (self, grp, sgrp)
                grp.t_vertex.append (tv)
                gfe.add_machine_vertex_instance (tv)

        # groups and subgroups with special functions
        first_grp = self.groups[0]
        first_subgroup_svt = first_grp.s_vertex[0]

        last_out_grp = self.output_chain[-1]
        last_out_subgroup_t_vertex = (
            last_out_grp.t_vertex[last_out_grp.subgroups - 1]
            )

        # create associated forward, backprop, link delta summation,
        # criterion, stop and sync machine edges for every subgroup
        for grp in self.groups:
            for sgrp in range (grp.subgroups):
                svt = grp.s_vertex[sgrp]
                iv  = grp.i_vertex[sgrp]
                tv  = grp.t_vertex[sgrp]

                for wv in grp.w_vertices[sgrp]:
                    from_grp  = wv.from_group
                    from_sgrp = wv.from_subgroup

                    from_svt = from_grp.s_vertex[from_sgrp]
                    from_tv  = from_grp.t_vertex[from_sgrp]

                    # sum tree leaf to connect to depends on group/subgroup 
                    svt_leaf      = svt.leaf (from_grp, from_sgrp)
                    from_svt_leaf = from_svt.leaf (grp, sgrp)

                    # forward w to s link
                    gfe.add_machine_edge_instance (
                        MachineEdge (wv, svt_leaf),
                        wv.fwd_link
                        )

                    # forward t to w (multicast) link
                    gfe.add_machine_edge_instance (
                        MachineEdge (from_tv, wv),
                        from_tv.fwd_link
                        )

                    # backprop w to s link
                    gfe.add_machine_edge_instance (
                        MachineEdge (wv, from_svt_leaf),
                        wv.bkp_link
                        )

                    # backprop i to w (multicast) link
                    gfe.add_machine_edge_instance (
                        MachineEdge (iv, wv),
                        iv.bkp_link
                        )

                    # link delta summation w to s link
                    gfe.add_machine_edge_instance (
                        MachineEdge (wv, svt_leaf),
                        wv.lds_link
                        )

                    # link delta result (first group) s to w (multicast) link
                    gfe.add_machine_edge_instance (
                        MachineEdge (first_subgroup_svt.root, wv),
                        first_subgroup_svt.root.lds_link
                        )

                    # stop (last output group/subgroup) t to w (multicast) link
                    gfe.add_machine_edge_instance (
                        MachineEdge (last_out_subgroup_t_vertex, wv),
                        last_out_subgroup_t_vertex.stp_link
                        )

                    # forward sync generation w to s links
                    gfe.add_machine_edge_instance (
                        MachineEdge (wv, svt_leaf),
                        wv.fsg_link
                        )

                # forward s to i link
                gfe.add_machine_edge_instance (
                    MachineEdge (svt.root, iv),
                    svt.root.fwd_link
                    )

                # forward i to t link
                gfe.add_machine_edge_instance (
                    MachineEdge (iv, tv),
                    iv.fwd_link
                    )

                # backprop t to i link
                gfe.add_machine_edge_instance (
                    MachineEdge (tv, iv),
                    tv.bkp_link
                    )

                # backprop s to t link
                gfe.add_machine_edge_instance (
                    MachineEdge (svt.root, tv),
                    svt.root.bkp_link
                    )

                # link delta summation s to s link
                if sgrp != 0:
                    # first subgroup collects from all other subgroups
                    gfe.add_machine_edge_instance (
                        MachineEdge (
                            svt.root,
                            grp.s_vertex[0].root
                            ),
                        svt.root.lds_link
                        )
                elif grp != first_grp:
                    # first group collects from all other groups
                    gfe.add_machine_edge_instance (
                        MachineEdge (
                            svt.root,
                            first_subgroup_svt.root
                            ),
                        svt.root.lds_link
                        )

                # t to t criterion link 
                # intra-group criterion link to last subgroup t
                if sgrp < (grp.subgroups - 1):
                    gfe.add_machine_edge_instance (
                        MachineEdge (tv, grp.t_vertex[grp.subgroups - 1]),
                        tv.stp_link
                        )
                elif grp != last_out_grp:
                    # inter-group criterion link to last output subgroup
                    gfe.add_machine_edge_instance (
                        MachineEdge (tv, last_out_subgroup_t_vertex),
                        tv.stp_link
                        )

                # stop (last output group/subgroup) t to s (multicast) link
                for s in svt.vertices:
                    gfe.add_machine_edge_instance (
                        MachineEdge (last_out_subgroup_t_vertex, s),
                        last_out_subgroup_t_vertex.stp_link
                        )

                # stop (last output group/subgroup) t to i (multicast) link
                gfe.add_machine_edge_instance (
                    MachineEdge (last_out_subgroup_t_vertex, iv),
                    last_out_subgroup_t_vertex.stp_link
                    )

                # stop (last output group/subgroup) t to t (multicast) link
                if tv != last_out_subgroup_t_vertex:
                    gfe.add_machine_edge_instance (
                        MachineEdge (last_out_subgroup_t_vertex, tv),
                        last_out_subgroup_t_vertex.stp_link
                        )

                # forward sync generation s to s links
                #NOTE: s cores that are tree internal nodes not involved
                if sgrp != 0:
                    # first subgroup collects from all other subgroups
                    gfe.add_machine_edge_instance (
                        MachineEdge (
                            svt.root,
                            grp.s_vertex[0].root
                            ),
                        svt.root.fsg_link
                        )
                elif grp != first_grp:
                    # first group collects from all other groups
                    gfe.add_machine_edge_instance (
                        MachineEdge (
                            svt.root,
                            first_subgroup_svt.root
                            ),
                        svt.root.fsg_link
                        )

        # forward sync generation first s to last t link
        gfe.add_machine_edge_instance (
            MachineEdge (first_subgroup_svt.root, last_out_subgroup_t_vertex),
            first_subgroup_svt.root.fsg_link
            )

        self._graph_rdy = True

        return True
Beispiel #27
0
from pacman.model.graphs.machine import MachineEdge

from pkt_injector_vertex import Pkt_Injector_Vertex
from pkt_extractor_vertex import Pkt_Extractor_Vertex

NUM_INJECTORS = 9

gfe.setup(machine_time_step=1000000,
          n_chips_required=1,
          model_binary_folder=os.path.dirname(__file__))

# instantiate injector vertices
injectors = []
for i in range(NUM_INJECTORS):
    iv = Pkt_Injector_Vertex(i)
    gfe.add_machine_vertex_instance(iv)
    injectors.append(iv)

# instantiate extractor vertices
ev = Pkt_Extractor_Vertex()
gfe.add_machine_vertex_instance(ev)

# create links from injectors to extractor
for iv in injectors:
    gfe.add_machine_edge_instance(MachineEdge(iv, ev), iv.inj_lnk)

gfe.run(10000)

gfe.stop()
Beispiel #28
0
import os
import spinnaker_graph_front_end as front_end
from spinnaker_graph_front_end.examples.hello_world_untimed\
    .hello_world_vertex import HelloWorldVertex

logger = logging.getLogger(__name__)

front_end.setup(n_chips_required=1,
                model_binary_folder=os.path.dirname(__file__))

# Put HelloWorldVertex onto 16 cores
total_number_of_cores = 16
prints_per_run = 10
runs = 2
for x in range(total_number_of_cores):
    front_end.add_machine_vertex_instance(
        HelloWorldVertex(label="Hello World {}".format(x)))

for _ in range(runs):
    front_end.run_until_complete(prints_per_run)

placements = front_end.placements()

if not front_end.use_virtual_machine():
    buffer_manager = front_end.buffer_manager()
    for placement in sorted(placements.placements,
                            key=lambda p: (p.x, p.y, p.p)):

        if isinstance(placement.vertex, HelloWorldVertex):
            hello_world = placement.vertex.read(placement, buffer_manager)
            logger.info("{}, {}, {} > {}".format(placement.x, placement.y,
                                                 placement.p, hello_world))