def _get_gatherer_for_monitor(monitor):
    placement = sim.placements().get_placement_of_vertex(monitor)
    chip = sim.machine().get_chip_at(placement.x, placement.y)
    the_sim = sim.globals_variables.get_simulator()
    # pylint: disable=protected-access
    gatherers = the_sim._vertex_to_ethernet_connected_chip_mapping
    return (gatherers, gatherers[chip.nearest_ethernet_x,
                                 chip.nearest_ethernet_y])
def _get_monitor_placement(monitor_vertices, placement):
    """ Get the receiver placement on the same chip as a given placement
    """
    for vertex in monitor_vertices.values():
        vtx_plt = sim.placements().get_placement_of_vertex(vertex)
        if vtx_plt.x == placement.x and vtx_plt.y == placement.y:
            return vtx_plt
    raise Exception("no extra monitor on same chip as {}".format(placement))
Пример #3
0
def _get_gatherer_for_monitor(monitor):
    placement = sim.placements().get_placement_of_vertex(monitor)
    chip = sim.machine().get_chip_at(placement.x, placement.y)
    the_sim = sim.globals_variables.get_simulator()
    # pylint: disable=protected-access
    gatherers = the_sim._last_run_outputs[_GATHERER_MAP]
    return (gatherers, gatherers[chip.nearest_ethernet_x,
                                 chip.nearest_ethernet_y])
Пример #4
0
    def run(self, mbs):

        # setup system
        sim.setup(model_binary_module=test_extra_monitor_core_data_extraction,
                  n_chips_required=2)

        # build verts
        writer = SDRAMWriter(mbs)

        # add verts to graph
        sim.add_machine_vertex_instance(writer)

        sim.run(12)

        # get placements for extraction
        placements = sim.placements()
        machine = sim.machine()

        writer_placement = placements.get_placement_of_vertex(writer)
        writer_chip = \
            machine.get_chip_at(writer_placement.x, writer_placement.y)
        writer_nearest_ethernet = machine.get_chip_at(
            writer_chip.nearest_ethernet_x, writer_chip.nearest_ethernet_y)

        extra_monitor_vertices = sim.globals_variables.\
            get_simulator()._last_run_outputs['MemoryExtraMonitorVertices']
        extra_monitor_gatherers = sim.globals_variables.\
            get_simulator()._last_run_outputs[
                'MemoryMCGatherVertexToEthernetConnectedChipMapping']

        receiver = None
        gatherer = extra_monitor_gatherers[(writer_nearest_ethernet.x,
                                            writer_nearest_ethernet.y)]
        for vertex in extra_monitor_vertices:
            placement = placements.get_placement_of_vertex(vertex)
            if (placement.x == writer_placement.x
                    and placement.y == writer_placement.y):
                receiver = vertex

        start = float(time.time())

        gatherer.set_cores_for_data_extraction(sim.transceiver(),
                                               extra_monitor_vertices,
                                               placements)
        data = gatherer.get_data(
            sim.transceiver(), placements.get_placement_of_vertex(receiver),
            self._get_data_region_address(sim.transceiver(), writer_placement),
            writer.mbs_in_bytes)
        gatherer.unset_cores_for_data_extraction(sim.transceiver(),
                                                 extra_monitor_vertices,
                                                 placements)
        end = float(time.time())

        print("time taken to extract {} MB is {}. MBS of {}".format(
            mbs, end - start, (mbs * 8) / (end - start)))

        self._check_data(data)
Пример #5
0
    def show_test_results (self):
        """ show stage test results corresponding to
            (C struct) test_results in mlp_types.h:

            typedef struct test_results {
            uint epochs_trained;
            uint examples_tested;
            uint ticks_tested;
            uint examples_correct;
            } test_results_t;

            pack: standard sizes, little-endian byte order,
            explicit padding
        """
        if not self.rec_test_results:
            print ("\n--------------------------------------------------")
            print ("warning: test results not recorded")
            print ("--------------------------------------------------\n")
            return

        if not self._aborted:
            # prepare to retrieve recorded test results data
            TEST_RESULTS_FORMAT = "<4I"
            TEST_RESULTS_SIZE = struct.calcsize(TEST_RESULTS_FORMAT)

            # retrieve recorded test results from last output subgroup
            g = self.out_grps[-1]
            ltv = g.t_vertex[g.subgroups - 1]
            try:
                rec_test_results = ltv.read (
                    gfe.placements().get_placement_of_vertex (ltv),
                    gfe.buffer_manager(), MLPConstSizeRecordings.TEST_RESULTS.value
                    )
            except Exception as err:
                print ("\n--------------------------------------------------")
                print (f"error: test results aborted - {err}")
                print ("--------------------------------------------------\n")
                return

            if len (rec_test_results) >= TEST_RESULTS_SIZE:
                (epochs_trained, examples_tested, ticks_tested, examples_correct) = \
                    struct.unpack_from(TEST_RESULTS_FORMAT, rec_test_results, 0)

                print ("\n--------------------------------------------------")
                print ("stage {} Test results: {}, {}, {}, {}".format(
                    self._stage_id, epochs_trained, examples_tested,
                    ticks_tested, examples_correct
                    ))
                print ("--------------------------------------------------\n")

                if self._results_file is not None:
                    with open(self._results_file, 'a') as f:
                        f.write("{},{},{},{}\n".format(
                            epochs_trained, examples_tested, ticks_tested,
                            examples_correct
                            ))
Пример #6
0
    def _do_run(reader, receiver, mbs):

        # run forever (to allow better speed testing)
        sim.run()

        # get placements for extraction
        placements = sim.placements()
        sim.transceiver().set_watch_dog(False)

        # set time outs using new interface (
        # clunky, but will be hidden in interface at later date)
        extra_monitor_vertices = sim.globals_variables. \
            get_simulator()._last_run_outputs['MemoryExtraMonitorVertices']

        try:
            print("starting data gathering")
            start = float(time.time())

            data, lost_seq_data = receiver.get_data(
                sim.transceiver(),
                placements.get_placement_of_vertex(reader),
                extra_monitor_vertices, placements)
            end = float(time.time())
            # end sim
            sim.stop()

            # check data is correct here
            ints = struct.unpack("<{}I".format(len(data) // 4), data)
            start_value = 0
            for value in ints:
                if value != start_value:
                    print("should be getting {}, but got {}".format(
                        start_value, value))
                    start_value = value + 1
                else:
                    start_value += 1

            # print data
            seconds = float(end - start)
            speed = (mbs * 8) / seconds
            print("Read {} MB in {} seconds ({} Mb/s)".format(
                mbs, seconds, speed))
            del data
            return speed, True, False, "", lost_seq_data

        except Exception as e:
            # if boomed. end so that we can get iobuf
            traceback.print_exc()
            sim.stop()
            return None, True, True, str(e), 0
def _do_transfer(gatherer, gatherers, monitor_vertices, receiver_placement,
                 writer_placement, writer_vertex):
    """
    :param .DataSpeedUpPacketGatherMachineVertex gatherer:
    :param dict(tuple(int,int),.DataSpeedUpPacketGatherMachineVertex) \
            gatherers:
    :param list(.ExtraMonitorSupportMachineVertex) monitor_vertices:
    :param .Placement receiver_placement:
    :param .Placement writer_placement:
    :param SDRAMWriter writer_vertex:
    :rtype: bytearray
    """
    with StreamingContextManager(gatherers.values(), sim.transceiver(),
                                 monitor_vertices, sim.placements()):
        return gatherer.get_data(extra_monitor=receiver_placement.vertex,
                                 placement=receiver_placement,
                                 memory_address=get_data_region_address(
                                     sim.transceiver(), writer_placement,
                                     DataRegions.DATA),
                                 length_in_bytes=writer_vertex.mbs_in_bytes,
                                 fixed_routes=None)
Пример #8
0
    def check_extra_monitor(self):
        mbs = _TRANSFER_SIZE_MEGABYTES

        # setup system
        globals_variables.unset_simulator()
        sim.setup(model_binary_folder=os.path.dirname(__file__),
                  n_chips_required=2)

        # build verts
        writer_vertex = SDRAMWriter(mbs)

        # add verts to graph
        sim.add_machine_vertex_instance(writer_vertex)
        sim.run(12)

        writer_placement = sim.placements().get_placement_of_vertex(
            writer_vertex)

        # pylint: disable=protected-access
        outputs = sim.globals_variables.get_simulator()._last_run_outputs
        monitor_vertices = outputs[_MONITOR_VERTICES]

        receiver_plt = _get_monitor_placement(monitor_vertices,
                                              writer_placement)
        gatherers, gatherer = _get_gatherer_for_monitor(writer_vertex)

        start = float(time.time())

        data = _do_transfer(gatherer, gatherers, monitor_vertices,
                            receiver_plt, writer_placement, writer_vertex)

        end = float(time.time())

        print(
            "time taken to extract {} MB is {}. Transfer rate: {} Mb/s".format(
                mbs, end - start, (mbs * 8) / (end - start)))

        check_data(data)

        sim.stop()
    def check_extra_monitor(self):
        mbs = _TRANSFER_SIZE_MEGABYTES

        # setup system
        sim.setup(model_binary_folder=os.path.dirname(__file__),
                  n_chips_required=2)

        # build verts
        writer_vertex = SDRAMWriter(mbs)

        # add verts to graph
        sim.add_machine_vertex_instance(writer_vertex)
        sim.run(12)

        writer_placement = sim.placements().get_placement_of_vertex(
            writer_vertex)

        # pylint: disable=protected-access
        monitor_vertices = sim.globals_variables.get_simulator().\
            _extra_monitor_to_chip_mapping

        receiver_plt = _get_monitor_placement(monitor_vertices,
                                              writer_placement)
        gatherers, gatherer = _get_gatherer_for_monitor(writer_vertex)

        start = float(time.time())

        data = _do_transfer(gatherer, gatherers, monitor_vertices,
                            receiver_plt, writer_placement, writer_vertex)

        end = float(time.time())

        print(f"time taken to extract {mbs} MB is {end - start}. "
              f"Transfer rate: {(mbs * 8) / (end - start)} Mb/s")

        check_data(data)

        sim.stop()
Пример #10
0
logger = logging.getLogger(__name__)

front_end.setup(
    n_chips_required=None, model_binary_folder=os.path.dirname(__file__))

'''
calculate total number of 'free' cores for the given board
(i.e. does not include those busy with SARK or reinjection)'''
total_number_of_cores = \
    front_end.get_number_of_available_cores_on_machine()

#param1: data
#param2: number of chips used
#param3: what columns to use
#param4: how many string columns exist?
#param5: function id
load_data_onto_vertices(raw_data, 1, [0], 1, 2)

front_end.run(10000)

placements = front_end.placements()
buffer_manager = front_end.buffer_manager()

#write_unique_ids_to_csv(getData,1,len(raw_data))
#display_linked_list_size()
#display_results_function_one()
#display_results_function_two()
display_results_function_three()
front_end.stop()
import spinnaker_graph_front_end as front_end
from gfe_examples.hello_world_untimed.hello_world_vertex import (
    HelloWorldVertex)

logger = FormatAdapter(logging.getLogger(__name__))

front_end.setup(n_chips_required=1,
                model_binary_folder=os.path.dirname(__file__))

# Put HelloWorldVertex onto 16 cores
total_number_of_cores = 16
prints_per_run = 10
runs = 2
for x in range(total_number_of_cores):
    front_end.add_machine_vertex_instance(
        HelloWorldVertex(label=f"Hello World {x}"))

for _ in range(runs):
    front_end.run_until_complete(prints_per_run)

if not front_end.use_virtual_machine():
    for placement in sorted(front_end.placements().placements,
                            key=lambda p: (p.x, p.y, p.p)):

        if isinstance(placement.vertex, HelloWorldVertex):
            hello_world = placement.vertex.read()
            logger.info("{}, {}, {} > {}", placement.x, placement.y,
                        placement.p, hello_world)

front_end.stop()
Пример #12
0
    def run(self, mbs, number_of_repeats):

        # setup system
        sim.setup(model_binary_module=(
            test_extra_monitor_core_data_extraction_multiple_locations),
                  n_chips_required=49 * 2)

        # build vertices
        locs = [(0, 0), (2, 2), (7, 7), (3, 0), (1, 0), (0, 1), (3, 3), (4, 4),
                (5, 5), (3, 5), (4, 0), (7, 4), (8, 4), (4, 8), (11, 11),
                (11, 0), (0, 11), (6, 3), (0, 6)]
        writers = list()

        for chip_x, chip_y in locs:
            writer = SDRAMWriter(mbs,
                                 constraint=ChipAndCoreConstraint(
                                     chip_x, chip_y))
            # add vertices to graph
            sim.add_machine_vertex_instance(writer)
            writers.append(writer)

        sim.run(12)

        # get placements for extraction
        placements = sim.placements()
        machine = sim.machine()

        extra_monitor_vertices = sim.globals_variables. \
            get_simulator()._last_run_outputs[
                'MemoryExtraMonitorVertices']
        extra_monitor_gatherers = sim.globals_variables. \
            get_simulator()._last_run_outputs[
                'MemoryMCGatherVertexToEthernetConnectedChipMapping']
        time_out_setter = extra_monitor_gatherers[(0, 0)]

        time_out_setter.set_cores_for_data_extraction(sim.transceiver(),
                                                      extra_monitor_vertices,
                                                      placements)

        for _ in range(0, number_of_repeats):
            for writer in writers:

                writer_placement = placements.get_placement_of_vertex(writer)
                writer_chip = \
                    machine.get_chip_at(writer_placement.x, writer_placement.y)
                writer_nearest_ethernet = machine.get_chip_at(
                    writer_chip.nearest_ethernet_x,
                    writer_chip.nearest_ethernet_y)

                receiver = None
                gatherer = extra_monitor_gatherers[(writer_nearest_ethernet.x,
                                                    writer_nearest_ethernet.y)]
                for vertex in extra_monitor_vertices:
                    placement = placements.get_placement_of_vertex(vertex)
                    if (placement.x == writer_placement.x
                            and placement.y == writer_placement.y):
                        receiver = vertex

                start = float(time.time())
                data = gatherer.get_data(
                    sim.transceiver(),
                    placements.get_placement_of_vertex(receiver),
                    self._get_data_region_address(sim.transceiver(),
                                                  writer_placement),
                    writer.mbs_in_bytes)
                end = float(time.time())

                print("time taken to extract {} MB is {}. MBS of {}".format(
                    mbs, end - start, (mbs * 8) / (end - start)))

                self._check_data(data)

        time_out_setter.unset_cores_for_data_extraction(
            sim.transceiver(), extra_monitor_vertices, placements)
Пример #13
0
mbs = 1.0

# setup system
sim.setup(model_binary_module=speed_test_solo)

# build verts
receiver = PacketGathererCheat(mbs, 1)

# add verts to graph
sim.add_machine_vertex_instance(receiver)

# run forever (to allow better speed testing)
sim.run()

# get placements for extraction
placements = sim.placements()

# try getting data via mc transmission
start = None
end = None
data = None

sim.transceiver().set_watch_dog(False)

try:
    print("starting data gathering")
    start = float(time.time())
    data = receiver.get_data(
        sim.transceiver(),
        placements.get_placement_of_vertex(receiver))
    end = float(time.time())
                MachineEdge(vertices[x][y],
                            vertices[dest_x][dest_y],
                            label=compass), ConwayBasicCell.PARTITION_ID)

# run the simulation
front_end.run(runtime)

# get recorded data
recorded_data = dict()

# get the data per vertex
for x in range(0, MAX_X_SIZE_OF_FABRIC):
    for y in range(0, MAX_Y_SIZE_OF_FABRIC):
        recorded_data[(x, y)] = vertices[x][y].get_data(
            front_end.buffer_manager(),
            front_end.placements().get_placement_of_vertex(vertices[x][y]))

# visualise it in text form (bad but no vis this time)
for time in range(0, runtime):
    print "at time {}".format(time)
    output = ""
    for y in range(MAX_X_SIZE_OF_FABRIC - 1, 0, -1):
        for x in range(0, MAX_Y_SIZE_OF_FABRIC):
            if recorded_data[(x, y)][time]:
                output += "X"
            else:
                output += " "
        output += "\n"
    print output
    print "\n\n"
Пример #15
0
def placements():
    return front_end.placements()
Пример #16
0
    def write_Lens_output_file (self,
                                output_file
                                ):
        """ writes a Lens-style output file

            Lens online manual @ CMU:
                https://ni.cmu.edu/~plaut/Lens/Manual/

            File format:

            for each example:
              <I total-updates> <I example-number>
              <I ticks-on-example> <I num-groups>
              for each tick on the example:
                <I tick-number> <I event-number>
            for each WRITE_OUTPUTS group:
                  <I num-units> <B targets?>
              for each unit:
                    <R output-value> <R target-value>

            collects recorded tick data corresponding to
            (C struct) tick_record in mlp_types.h:

            typedef struct tick_record {
              uint epoch;    // current epoch
              uint example;  // current example
              uint event;    // current event
              uint tick;     // current tick
            } tick_record_t;

            collects recorded output data corresponding to
            (C type) short_activ_t in mlp_types.h:

            typedef short short_activ_t;

            pack: standard sizes, little-endian byte order,
            explicit padding
        """
        if not self._rec_data_rdy:
            print ("\n--------------------------------------------------")
            print ("warning: file write aborted - outputs not available")
            print ("--------------------------------------------------\n")
            return

        if not self._aborted:
            with open(output_file, 'w') as f:

                # retrieve recorded tick_data from first output subgroup
                g = self.out_grps[0]
                ftv = g.t_vertex[0]
                try:
                    rec_tick_data = ftv.read (
                        gfe.placements().get_placement_of_vertex (ftv),
                        gfe.buffer_manager(), MLPExtraRecordings.TICK_DATA.value
                        )
                except Exception as err:
                    print ("\n--------------------------------------------------")
                    print (f"error: write output file aborted - {err}")
                    print ("--------------------------------------------------\n")
                    return

                # retrieve recorded outputs from every output group
                rec_outputs = [None] * len (self.out_grps)
                for g in self.out_grps:
                    rec_outputs[g.write_blk] = []
                    # append all subgroups together
                    for s in range (g.subgroups):
                        gtv = g.t_vertex[s]
                        try:
                            rec_outputs[g.write_blk].append (gtv.read (
                                gfe.placements().get_placement_of_vertex (gtv),
                                gfe.buffer_manager(),
                                MLPVarSizeRecordings.OUTPUTS.value)
                                )
                        except Exception as err:
                            print ("\n--------------------------------------------------")
                            print (f"error: write output file aborted - {err}")
                            print ("--------------------------------------------------\n")
                            return

                # compute total ticks in first example
                #TODO: need to get actual value from simulation, not max value
                ticks_per_example = 0
                for ev in self._ex_set.examples[0].events:
                    # use event max_time if available or default to set max_time,
                    #NOTE: check for absent or NaN
                    if (ev.max_time is None) or (ev.max_time != ev.max_time):
                        max_time = int (self._ex_set.max_time)
                    else:
                        max_time = int (ev.max_time)

                    # compute number of ticks for max time,
                    ticks_per_example += (max_time + 1) * self._ticks_per_interval

                    # and limit to the global maximum if required
                    if ticks_per_example > self.global_max_ticks:
                        ticks_per_example = self.global_max_ticks

                # prepare to retrieve recorded data
                TICK_DATA_FORMAT = "<4I"
                TICK_DATA_SIZE = struct.calcsize(TICK_DATA_FORMAT)

                TOTAL_TICKS = len (rec_tick_data) // TICK_DATA_SIZE

                # print recorded data in correct order
                current_epoch = -1
                for tk in range (TOTAL_TICKS):
                    (epoch, example, event, tick) = struct.unpack_from(
                        TICK_DATA_FORMAT,
                        rec_tick_data,
                        tk * TICK_DATA_SIZE
                        )

                    # check if starting new epoch
                    if (epoch != current_epoch):
                        current_epoch = epoch
                        current_example = -1

                    # check if starting new example
                    if (example != current_example):
                        # print example header
                        f.write (f"{epoch} {example}\n")
                        f.write (f"{ticks_per_example} {len (self.out_grps)}\n")

                        # include initial outputs if recording all ticks
                        if not self.rec_example_last_tick_only:
                            # print first (implicit) tick data
                            f.write ("0 -1\n")
                            for g in self.output_chain:
                                f.write (f"{g.units} 1\n")
                                for _ in range (g.units):
                                    f.write ("{:8.6f} {}\n".format (0, 0))

                        # compute event index
                        evt_inx = 0
                        for ex in range (example):
                            evt_inx += len (self._ex_set.examples[ex].events)

                        # and prepare for next 
                        current_example = example

                    # compute index into target array
                    tgt_inx = evt_inx + event

                    # print current tick data
                    f.write (f"{tick} {event}\n")

                    for g in self.output_chain:
                        outputs = []
                        # get tick outputs for each subgroup
                        for sg, rec_outs in enumerate (rec_outputs[g.write_blk]):
                            outputs += struct.unpack_from (
                                f"<{g.subunits[sg]}H",
                                rec_outs,
                                tk * struct.calcsize(f"<{g.subunits[sg]}H")
                                )

                        # print outputs
                        f.write (f"{g.units} 1\n")
                        tinx = tgt_inx * g.units
                        for u in range (g.units):
                            # outputs are s16.15 fixed-point numbers
                            out = (1.0 * outputs[u]) / (1.0 * (1 << 15))
                            t = g.targets[tinx + u]
                            #NOTE: check for absent or NaN
                            if (t is None) or (t != t):
                                tgt = "-"
                            else:
                                tgt = int (t)
                            f.write ("{:8.6f} {}\n".format (out, tgt))

        # recorded data no longer available
        self._rec_data_rdy = False