Ejemplo n.º 1
0
def _plan_expansion(app_graph, placements, synapse_expander_bin,
                    delay_expander_bin, progress):
    expander_cores = ExecutableTargets()
    expanded_pop_vertices = list()

    for vertex in progress.over(app_graph.vertices, finish_at_end=False):
        # Add all machine vertices of the population vertex to ones
        # that need synapse expansion
        if isinstance(vertex, AbstractPopulationVertex):
            gen_on_machine = False
            for m_vertex in vertex.machine_vertices:
                if vertex.gen_on_machine(m_vertex.vertex_slice):
                    placement = placements.get_placement_of_vertex(m_vertex)
                    expander_cores.add_processor(
                        synapse_expander_bin,
                        placement.x,
                        placement.y,
                        placement.p,
                        executable_type=ExecutableType.SYSTEM)
                    gen_on_machine = True
            if gen_on_machine:
                expanded_pop_vertices.append(vertex)
        elif isinstance(vertex, DelayExtensionVertex):
            for m_vertex in vertex.machine_vertices:
                if vertex.gen_on_machine(m_vertex.vertex_slice):
                    placement = placements.get_placement_of_vertex(m_vertex)
                    expander_cores.add_processor(
                        delay_expander_bin,
                        placement.x,
                        placement.y,
                        placement.p,
                        executable_type=ExecutableType.SYSTEM)

    return expander_cores, expanded_pop_vertices
Ejemplo n.º 2
0
    def _locate_synaptic_expander_cores(cores, executable_finder, placements,
                                        machine):
        """ removes host based cores for synaptic matrix regeneration

        :param cores: the cores for everything
        :param executable_finder: way to get binary path
        :param machine: spiNNMachine instance.
        :return: new targets for synaptic expander
        """
        new_cores = ExecutableTargets()

        # locate expander executable path
        expander_executable_path = executable_finder.get_executable_path(
            SYNAPSE_EXPANDER)

        # if any ones are going to be ran on host, ignore them from the new
        # core setup
        for core_subset in cores.all_core_subsets:
            chip = machine.get_chip_at(core_subset.x, core_subset.y)
            for processor_id in range(0, chip.n_processors):
                if placements.is_processor_occupied(core_subset.x,
                                                    core_subset.y,
                                                    processor_id):
                    vertex = placements.get_vertex_on_processor(
                        core_subset.x, core_subset.y, processor_id)
                    app_vertex = vertex.app_vertex
                    if isinstance(vertex, AbstractSupportsBitFieldGeneration):
                        if app_vertex.gen_on_machine(vertex.vertex_slice):
                            new_cores.add_processor(
                                expander_executable_path,
                                core_subset.x,
                                core_subset.y,
                                processor_id,
                                executable_type=ExecutableType.SYSTEM)
        return new_cores
def _locate_expander_rerun_targets(
        bitfield_targets, executable_finder, placements,
        transceiver):
    """ removes host based cores for synaptic matrix regeneration

    :param ~.ExecutableTargets bitfield_targets: the cores that were used
    :param ~.ExecutableFinder executable_finder: way to get binary path
    :param ~.Placements placements: placements on machine
    :param ~.Transceiver transceiver: spinnman instance
    :return: new targets for synaptic expander
    :rtype: ~.ExecutableTargets
    """

    # locate expander executable path
    expander_executable_path = executable_finder.get_executable_path(
        SYNAPSE_EXPANDER_APLX)

    # if any ones are going to be ran on host, ignore them from the new
    # core setup
    new_cores = ExecutableTargets()
    for placement in __machine_expandables(
            bitfield_targets.all_core_subsets, placements):
        new_cores.add_processor(
            expander_executable_path,
            placement.x, placement.y, placement.p,
            executable_type=ExecutableType.SYSTEM)
        # Write the region to USER1, as that is the best we can do
        write_address_to_user1(
            transceiver, placement.x, placement.y, placement.p,
            placement.vertex.connection_generator_region)
    return new_cores
Ejemplo n.º 4
0
def synapse_expander(
        app_graph, graph_mapper, placements, transceiver,
        provenance_file_path, executable_finder):
    """ Run the synapse expander - needs to be done after data has been loaded
    """

    synapse_expander = executable_finder.get_executable_path(SYNAPSE_EXPANDER)
    delay_expander = executable_finder.get_executable_path(DELAY_EXPANDER)

    progress = ProgressBar(len(app_graph.vertices) + 2, "Expanding Synapses")

    # Find the places where the synapse expander and delay receivers should run
    expander_cores = ExecutableTargets()
    for vertex in progress.over(app_graph.vertices, finish_at_end=False):

        # Find population vertices
        if isinstance(
                vertex, (AbstractPopulationVertex, DelayExtensionVertex)):

            # Add all machine vertices of the population vertex to ones
            # that need synapse expansion
            for m_vertex in graph_mapper.get_machine_vertices(vertex):
                vertex_slice = graph_mapper.get_slice(m_vertex)
                if vertex.gen_on_machine(vertex_slice):
                    placement = placements.get_placement_of_vertex(m_vertex)
                    if isinstance(vertex, AbstractPopulationVertex):
                        binary = synapse_expander
                    else:
                        binary = delay_expander
                    expander_cores.add_processor(
                        binary, placement.x, placement.y, placement.p)

    # Launch the delay receivers
    expander_app_id = transceiver.app_id_tracker.get_new_id()
    transceiver.execute_application(expander_cores, expander_app_id)
    progress.update()

    # Wait for everything to finish
    finished = False
    try:
        transceiver.wait_for_cores_to_be_in_state(
            expander_cores.all_core_subsets, expander_app_id,
            [CPUState.FINISHED])
        progress.update()
        finished = True
        _extract_iobuf(expander_cores, transceiver, provenance_file_path)
        progress.end()
    except Exception:
        logger.exception("Synapse expander has failed")
        _handle_failure(
            expander_cores, transceiver, provenance_file_path)
    finally:
        transceiver.stop_application(expander_app_id)
        transceiver.app_id_tracker.free_id(expander_app_id)

        if not finished:
            raise SpynnakerException(
                "The synapse expander failed to complete")
def synapse_expander(
        app_graph, graph_mapper, placements, transceiver,
        provenance_file_path, executable_finder):
    """ Run the synapse expander - needs to be done after data has been loaded
    """

    synapse_expander = executable_finder.get_executable_path(SYNAPSE_EXPANDER)
    delay_expander = executable_finder.get_executable_path(DELAY_EXPANDER)

    progress = ProgressBar(len(app_graph.vertices) + 2, "Expanding Synapses")

    # Find the places where the synapse expander and delay receivers should run
    expander_cores = ExecutableTargets()
    for vertex in progress.over(app_graph.vertices, finish_at_end=False):

        # Find population vertices
        if isinstance(
                vertex, (AbstractPopulationVertex, DelayExtensionVertex)):

            # Add all machine vertices of the population vertex to ones
            # that need synapse expansion
            for m_vertex in graph_mapper.get_machine_vertices(vertex):
                vertex_slice = graph_mapper.get_slice(m_vertex)
                if vertex.gen_on_machine(vertex_slice):
                    placement = placements.get_placement_of_vertex(m_vertex)
                    if isinstance(vertex, AbstractPopulationVertex):
                        binary = synapse_expander
                    else:
                        binary = delay_expander
                    expander_cores.add_processor(
                        binary, placement.x, placement.y, placement.p)

    # Launch the delay receivers
    expander_app_id = transceiver.app_id_tracker.get_new_id()
    transceiver.execute_application(expander_cores, expander_app_id)
    progress.update()

    # Wait for everything to finish
    finished = False
    try:
        transceiver.wait_for_cores_to_be_in_state(
            expander_cores.all_core_subsets, expander_app_id,
            [CPUState.FINISHED])
        progress.update()
        finished = True
        _extract_iobuf(expander_cores, transceiver, provenance_file_path)
        progress.end()
    except Exception:
        logger.exception("Synapse expander has failed")
        _handle_failure(
            expander_cores, transceiver, provenance_file_path)
    finally:
        transceiver.stop_application(expander_app_id)
        transceiver.app_id_tracker.free_id(expander_app_id)

        if not finished:
            raise SpynnakerException(
                "The synapse expander failed to complete")
 def add_processor(self,
                   binary,
                   chip_x,
                   chip_y,
                   chip_p,
                   executable_type=None):
     SuperExecTargets.add_processor(self, binary, chip_x, chip_y, chip_p)
     if executable_type is not None:
         self._binary_type_map[executable_type].add(binary)
class GraphBinaryGatherer(object):
    """ Extracts binaries to be executed.

    :param ~pacman.model.placements.Placements placements:
    :param ~pacman.model.graphs.machine.MachineGraph graph:
    :param ~spinn_utilities.executable_finder.ExecutableFinder \
            executable_finder:
    :rtype: ExecutableTargets
    """

    __slots__ = ["_exe_finder", "_exe_targets"]

    def __init__(self):
        self._exe_finder = None
        self._exe_targets = None

    def __call__(self, placements, graph, executable_finder):
        """
        :param ~.Placements placements:
        :param ~.MachineGraph graph:
        :param ExecutableFinder executable_finder:
        :rtype: ExecutableTargets
        """
        self._exe_finder = executable_finder
        self._exe_targets = ExecutableTargets()
        progress = ProgressBar(graph.n_vertices, "Finding binaries")
        for vertex in progress.over(graph.vertices):
            placement = placements.get_placement_of_vertex(vertex)
            self.__get_binary(placement, vertex)

        return self._exe_targets

    def __get_binary(self, placement, vertex):
        """
        :param ~.Placement placement:
        :param ~.AbstractVertex vertex:
        """
        # if the vertex cannot be executed, ignore it
        if not isinstance(vertex, AbstractHasAssociatedBinary):
            return

        # Get name of binary from vertex
        binary_name = vertex.get_binary_file_name()
        exec_type = vertex.get_binary_start_type()

        # Attempt to find this within search paths
        binary_path = self._exe_finder.get_executable_path(binary_name)
        if binary_path is None:
            raise ExecutableNotFoundException(binary_name)

        self._exe_targets.add_processor(binary_path, placement.x, placement.y,
                                        placement.p, exec_type)
Ejemplo n.º 8
0
def _plan_expansion(placements, synapse_expander_bin, delay_expander_bin,
                    transceiver):
    """ Plan the expansion of synapses and set up the regions using USER1

    :param ~pacman.model.placements.Placements: The placements of the vertices
    :param str synapse_expander_bin: The binary name of the synapse expander
    :param str delay_expander_bin: The binary name of the delay expander
    :param ~spinnman.transceiver.Transceiver transceiver:
        How to talk to the machine
    :return: The places to load the synapse expander and delay expander
        executables, and the target machine vertices to read synapses back from
    :rtype: (ExecutableTargets, list(MachineVertex, Placement))
    """
    expander_cores = ExecutableTargets()
    expanded_pop_vertices = list()

    progress = ProgressBar(len(placements), "Preparing to Expand Synapses")
    for placement in progress.over(placements):
        # Add all machine vertices of the population vertex to ones
        # that need synapse expansion
        vertex = placement.vertex
        if isinstance(vertex, AbstractSynapseExpandable):
            if vertex.gen_on_machine():
                expander_cores.add_processor(
                    synapse_expander_bin,
                    placement.x,
                    placement.y,
                    placement.p,
                    executable_type=ExecutableType.SYSTEM)
                expanded_pop_vertices.append((vertex, placement))
                # Write the region to USER1, as that is the best we can do
                write_address_to_user1(transceiver, placement.x, placement.y,
                                       placement.p,
                                       vertex.connection_generator_region)

        elif isinstance(vertex, DelayExtensionMachineVertex):
            if vertex.gen_on_machine():
                expander_cores.add_processor(
                    delay_expander_bin,
                    placement.x,
                    placement.y,
                    placement.p,
                    executable_type=ExecutableType.SYSTEM)

    return expander_cores, expanded_pop_vertices
Ejemplo n.º 9
0
 def test_front_end_common_load_executable_images(self):
     transceiver = _MockTransceiver(self)
     loader = LoadExecutableImages()
     targets = ExecutableTargets()
     targets.add_processor("test.aplx", 0, 0, 0)
     targets.add_processor("test.aplx", 0, 0, 1)
     targets.add_processor("test.aplx", 0, 0, 2)
     targets.add_processor("test2.aplx", 0, 1, 0)
     targets.add_processor("test2.aplx", 0, 1, 1)
     targets.add_processor("test2.aplx", 0, 1, 2)
     loader.__call__(targets, 30, transceiver, True)
Ejemplo n.º 10
0
 def test_front_end_common_load_executable_images(self):
     transceiver = _MockTransceiver(self)
     loader = LoadExecutableImages()
     targets = ExecutableTargets()
     targets.add_processor("test.aplx", 0, 0, 0, SIM)
     targets.add_processor("test.aplx", 0, 0, 1, SIM)
     targets.add_processor("test.aplx", 0, 0, 2, SIM)
     targets.add_processor("test2.aplx", 0, 1, 0, SIM)
     targets.add_processor("test2.aplx", 0, 1, 1, SIM)
     targets.add_processor("test2.aplx", 0, 1, 2, SIM)
     loader.load_app_images(targets, 30, transceiver)
    def test_call(self):
        executor = HostExecuteDataSpecification()
        transceiver = _MockTransceiver(user_0_addresses={0: 1000})
        machine = virtual_machine(2, 2)
        tempdir = tempfile.mkdtemp()

        dsg_targets = DataSpecificationTargets(machine, tempdir)
        with dsg_targets.create_data_spec(0, 0, 0) as spec_writer:
            spec = DataSpecificationGenerator(spec_writer)
            spec.reserve_memory_region(0, 100)
            spec.reserve_memory_region(1, 100, empty=True)
            spec.reserve_memory_region(2, 100)
            spec.switch_write_focus(0)
            spec.write_value(0)
            spec.write_value(1)
            spec.write_value(2)
            spec.switch_write_focus(2)
            spec.write_value(3)
            spec.end_specification()

        region_sizes = dict()
        region_sizes[0, 0,
                     0] = (APP_PTR_TABLE_BYTE_SIZE + sum(spec.region_sizes))

        # Execute the spec
        targets = ExecutableTargets()
        targets.add_processor("text.aplx", 0, 0, 0,
                              ExecutableType.USES_SIMULATION_INTERFACE)
        infos = executor.execute_application_data_specs(
            transceiver,
            machine,
            30,
            dsg_targets,
            False,
            targets,
            report_folder=tempdir,
            region_sizes=region_sizes)

        # Test regions - although 3 are created, only 2 should be uploaded
        # (0 and 2), and only the data written should be uploaded
        # The space between regions should be as allocated regardless of
        # how much data is written
        header_and_table_size = (MAX_MEM_REGIONS + 2) * BYTES_PER_WORD
        regions = transceiver.regions_written
        self.assertEqual(len(regions), 4)

        # Base address for header and table
        self.assertEqual(regions[1][0], 0)

        # Base address for region 0 (after header and table)
        self.assertEqual(regions[2][0], header_and_table_size)

        # Base address for region 2
        self.assertEqual(regions[3][0], header_and_table_size + 200)

        # User 0 write address
        self.assertEqual(regions[0][0], 1000)

        # Size of header and table
        self.assertEqual(len(regions[1][1]), header_and_table_size)

        # Size of region 0
        self.assertEqual(len(regions[2][1]), 12)

        # Size of region 2
        self.assertEqual(len(regions[3][1]), 4)

        # Size of user 0
        self.assertEqual(len(regions[0][1]), 4)

        info = infos[(0, 0, 0)]
        self.assertEqual(info.memory_used, 372)
        self.assertEqual(info.memory_written, 88)