Example #1
0
    def __call__(self, placements, file_path):
        """
        :param placements: the memory placements object
        :param file_path: the file path for the placements.json
        :return: file path for the placements.json
        """

        # write basic stuff
        json_obj = dict()
        vertex_by_id = dict()

        progress = ProgressBar(placements.n_placements + 1,
                               "converting to JSON placements")

        # process placements
        for placement in progress.over(placements, False):
            vertex_id = ident(placement.vertex)
            vertex_by_id[vertex_id] = placement.vertex
            json_obj[vertex_id] = [placement.x, placement.y]

        # dump dict into json file
        with open(file_path, "w") as file_to_write:
            json.dump(json_obj, file_to_write)
        progress.update()

        # validate the schema
        file_format_schemas.validate(json_obj, "placements.json")
        progress.end()

        # return the file format
        return file_path, vertex_by_id
Example #2
0
def _ner_route(machine_graph, machine, placements, vector_to_nodes):
    """ Performs routing using rig algorithm

    :param MachineGraph machine_graph:
    :param ~spinn_machine.Machine machine:
    :param Placements placements:
    :return:
    :rtype: MulticastRoutingTableByPartition
    """
    routing_tables = MulticastRoutingTableByPartition()

    progress_bar = ProgressBar(len(machine_graph.vertices), "Routing")

    for source_vertex in progress_bar.over(machine_graph.vertices):
        # handle the vertex edges
        for partition in machine_graph.\
                get_multicast_edge_partitions_starting_at_vertex(
                    source_vertex):
            post_vertexes = list(e.post_vertex for e in partition.edges)
            routing_tree = _do_route(source_vertex, post_vertexes, machine,
                                     placements, vector_to_nodes)
            incoming_processor = placements.get_placement_of_vertex(
                partition.pre_vertex).p
            _convert_a_route(routing_tables, partition, incoming_processor,
                             None, routing_tree)

    progress_bar.end()

    return routing_tables
    def __call__(self, txrx, app_id, all_core_subsets):
        # check that the right number of processors are in sync
        processors_completed = txrx.get_core_state_count(
            app_id, CPUState.FINISHED)
        total_processors = len(all_core_subsets)
        left_to_do_cores = total_processors - processors_completed

        progress = ProgressBar(
            left_to_do_cores,
            "Forcing error cores to generate provenance data")

        error_cores = txrx.get_cores_in_state(
            all_core_subsets, CPUState.RUN_TIME_EXCEPTION)
        watchdog_cores = txrx.get_cores_in_state(
            all_core_subsets, CPUState.WATCHDOG)
        idle_cores = txrx.get_cores_in_state(
            all_core_subsets, CPUState.IDLE)

        if error_cores or watchdog_cores or idle_cores:
            raise ConfigurationException(
                "Some cores have crashed. RTE cores {}, watch-dogged cores {},"
                " idle cores {}".format(
                    error_cores.values(), watchdog_cores.values(),
                    idle_cores.values()))

        # check that all cores are in the state FINISHED which shows that
        # the core has received the message and done provenance updating
        self._update_provenance(txrx, total_processors, processors_completed,
                                all_core_subsets, app_id, progress)
        progress.end()
Example #4
0
    def __call__(self, machine_graph, machine):
        progress_bar = ProgressBar(7, "Placing")

        vertices_resources, nets, _ = \
            rig_converters.convert_to_rig_graph(machine_graph)
        progress_bar.update()

        rig_machine = rig_converters.convert_to_rig_machine(machine)
        progress_bar.update()

        rig_constraints = rig_converters.create_rig_machine_constraints(
            machine)
        progress_bar.update()

        rig_constraints.extend(
            rig_converters.create_rig_graph_constraints(
                machine_graph, rig_machine))
        progress_bar.update()

        rig_placements = place(vertices_resources, nets, rig_machine,
                               rig_constraints)
        progress_bar.update()

        rig_allocations = allocate(vertices_resources, nets, rig_machine,
                                   rig_constraints, rig_placements)
        progress_bar.update()

        placements = rig_converters.convert_from_rig_placements(
            rig_placements, rig_allocations, machine_graph)
        progress_bar.update()
        progress_bar.end()

        return placements
    def __java_sys(self, dsg_targets, executable_targets, region_sizes):
        """ Does the Data Specification Execution and loading using Java

        :param DataSpecificationTargets dsg_targets:
            map of placement to file path
        :param ExecutableTargets executable_targets:
            the map between binaries and locations and executable types
        :param dict(tuple(int,int,int),int) region_sizes:
            the coord for region sizes for each core
        :return: map of cores to descriptions of what was written
        :rtype: DsWriteInfo
        """
        # create a progress bar for end users
        progress = ProgressBar(
            4, "Executing data specifications and loading data for system "
            "vertices using Java")

        dsg_targets.mark_system_cores(system_cores(executable_targets))
        progress.update()

        # Copy data from WriteMemoryIOData to database
        dw_write_info = self.__java_database(dsg_targets, progress,
                                             region_sizes)

        self._java.execute_system_data_specification()

        progress.end()
        return dw_write_info
    def __java_app(self, dsg_targets, executable_targets, use_monitors,
                   region_sizes):
        """
        :param DataSpecificationTargets dsg_targets:
        :param ExecutableTargets executable_targets:
        :param bool use_monitors:
        :param dict(tuple(int,int,int),int) region_sizes:
        :return: map of cores to descriptions of what was written
        :rtype: DsWriteInfo
        """
        # create a progress bar for end users
        progress = ProgressBar(
            4, "Executing data specifications and loading data for "
            "application vertices using Java")

        dsg_targets.mark_system_cores(system_cores(executable_targets))
        progress.update()

        # Copy data from WriteMemoryIOData to database
        dw_write_info = self.__java_database(dsg_targets, progress,
                                             region_sizes)
        if use_monitors:
            self._java.set_placements(self._placements, self._txrx)

        self._java.execute_app_data_specification(use_monitors)

        progress.end()
        return dw_write_info
    def __call__(self, machine_graph, machine, file_path):
        """
        :param machine_graph: the machine graph
        :param machine: the machine
        """

        progress = ProgressBar(
            machine_graph.n_vertices + 2, "creating JSON constraints")

        json_obj = list()
        self._add_monitor_core_reserve(json_obj)
        progress.update()
        self._add_extra_monitor_cores(json_obj, machine)
        progress.update()
        vertex_by_id = self._search_graph_for_placement_constraints(
            json_obj, machine_graph, machine, progress)

        with open(file_path, "w") as f:
            json.dump(json_obj, f)

        # validate the schema
        file_format_schemas.validate(json_obj, "constraints.json")

        # complete progress bar
        progress.end()

        return file_path, vertex_by_id
Example #8
0
    def __call__(self, txrx, app_id, all_core_subsets):
        # check that the right number of processors are in sync
        processors_completed = txrx.get_core_state_count(
            app_id, CPUState.FINISHED)
        total_processors = len(all_core_subsets)
        left_to_do_cores = total_processors - processors_completed

        progress = ProgressBar(
            left_to_do_cores,
            "Forcing error cores to generate provenance data")

        error_cores = txrx.get_cores_in_state(all_core_subsets,
                                              CPUState.RUN_TIME_EXCEPTION)
        watchdog_cores = txrx.get_cores_in_state(all_core_subsets,
                                                 CPUState.WATCHDOG)
        idle_cores = txrx.get_cores_in_state(all_core_subsets, CPUState.IDLE)

        if error_cores or watchdog_cores or idle_cores:
            raise ConfigurationException(
                "Some cores have crashed. RTE cores {}, watch-dogged cores {},"
                " idle cores {}".format(error_cores.values(),
                                        watchdog_cores.values(),
                                        idle_cores.values()))

        # check that all cores are in the state FINISHED which shows that
        # the core has received the message and done provenance updating
        self._update_provenance(txrx, total_processors, processors_completed,
                                all_core_subsets, app_id, progress)
        progress.end()
Example #9
0
    def __call__(self, executable_targets, app_id, transceiver,
                 loaded_application_data_token,
                 generated_connectors_on_machine_token):
        """ Go through the executable targets and load each binary to \
            everywhere and then send a start request to the cores that \
            actually use it
        """

        if not loaded_application_data_token:
            raise ConfigurationException(
                "The token for having loaded the application data token is set"
                " to false and therefore I cannot run. Please fix and try "
                "again")

        if not generated_connectors_on_machine_token:
            raise exceptions.ConfigurationException(
                "The token for generating connectors on machine token is set"
                " to false and therefore I cannot run. Please fix and try "
                "again")

        progress = ProgressBar(executable_targets.total_processors + 1,
                               "Loading executables onto the machine")

        for binary in executable_targets.binaries:
            progress.update(
                self._launch_binary(executable_targets, binary, transceiver,
                                    app_id))

        self._start_simulation(executable_targets, transceiver, app_id)
        progress.update()
        progress.end()

        return True
    def _write_router_provenance_data(self, router_tables, machine, txrx):
        """ Writes the provenance data of the router diagnostics

        :param router_tables: the routing tables generated by pacman
        :param machine: the spinnMachine object
        :param txrx: the transceiver object
        """
        progress = ProgressBar(machine.n_chips * 2,
                               "Getting Router Provenance")

        # acquire diagnostic data
        items = list()
        seen_chips = set()

        for router_table in sorted(router_tables.routing_tables,
                                   key=lambda table: (table.x, table.y)):
            self._write_router_table_diagnostic(txrx, machine, router_table.x,
                                                router_table.y, seen_chips,
                                                router_table, items)
            progress.update()

        for chip in sorted(machine.chips, key=lambda c: (c.x, c.y)):
            self._write_router_chip_diagnostic(txrx, chip, seen_chips, items)
            progress.update()
        progress.end()
        return items
Example #11
0
    def __call__(self, nengo_operator_graph, random_number_generator, seed):
        """
        
        :param nengo_operator_graph: 
        :param random_number_generator: 
        :param seed: 
        :return: 
        """

        progress_bar = ProgressBar(
            total_number_of_things_to_do=2,
            string_describing_what_being_progressed=(
                "Inserting Interposers where suitable."))

        # add interposers as required
        interposers, interposer_application_graph = \
            self._insert_interposers(
                nengo_operator_graph, random_number_generator)
        progress_bar.update()

        # compress interposers when applicable
        stacked_interposer_graph = self._stack_interposers(
            interposer_application_graph, interposers, random_number_generator,
            seed=seed)
        progress_bar.update()
        progress_bar.end()

        # return optimised app graph
        return stacked_interposer_graph
Example #12
0
    def __call__(self,
                 transceiver,
                 tags=None,
                 iptags=None,
                 reverse_iptags=None):
        """
        :param tags: the tags object which contains IP and reverse IP tags.
            could be none if these are being given in separate lists
        :param iptags: a list of IP tags, given when tags is none
        :param reverse_iptags: a list of reverse IP tags when tags is none.
        :param transceiver: the transceiver object
        """
        # clear all the tags from the Ethernet connection, as nothing should
        # be allowed to use it (no two apps should use the same Ethernet
        # connection at the same time)
        progress = ProgressBar(MAX_TAG_ID, "Clearing tags")
        for tag_id in progress.over(range(MAX_TAG_ID)):
            transceiver.clear_ip_tag(tag_id)

        # Use tags object to supply tag info if it is supplied
        if tags is not None:
            iptags = list(tags.ip_tags)
            reverse_iptags = list(tags.reverse_ip_tags)

        # Load the IP tags and the Reverse IP tags
        progress = ProgressBar(
            len(iptags) + len(reverse_iptags), "Loading Tags")
        self.load_iptags(iptags, transceiver, progress)
        self.load_reverse_iptags(reverse_iptags, transceiver, progress)
        progress.end()
    def __call__(self, machine_graph, machine, file_path):
        """
        :param machine_graph: the machine graph
        :param machine: the machine
        """

        progress = ProgressBar(
            machine_graph.n_vertices + 2, "creating JSON constraints")

        json_obj = list()
        self._add_monitor_core_reserve(json_obj)
        progress.update()
        self._add_extra_monitor_cores(json_obj, machine)
        progress.update()
        vertex_by_id = self._search_graph_for_placement_constraints(
            json_obj, machine_graph, machine, progress)

        with open(file_path, "w") as f:
            json.dump(json_obj, f)

        # validate the schema
        file_format_schemas.validate(json_obj, "constraints.json")

        # complete progress bar
        progress.end()

        return file_path, vertex_by_id
    def __java_all(self, dsg_targets):
        """ Does the Data Specification Execution and loading using Java

        :param dsg_targets: map of placement to file path
        :type dsg_targets: \
            :py:class:`spinn_front_end_common.interface.ds.DataSpecificationTargets`
        :return: map of of cores to a dict of \
            'start_address', 'memory_used', 'memory_written'
        :rtype: spinn_front_end_common.interface.ds.ds_write_info.DsWriteInfo
        """

        # create a progress bar for end users
        progress = ProgressBar(
            3, "Executing data specifications and loading data using Java")

        # Copy data from WriteMemoryIOData to database
        dw_write_info = DsWriteInfo(dsg_targets.get_database())
        dw_write_info.clear_write_info()
        if self._write_info_map is not None:
            for core, info in iteritems(self._write_info_map):
                dw_write_info[core] = info

        progress.update()

        dsg_targets.set_app_id(self._app_id)
        self._java.set_machine(self._machine)
        self._java.set_report_folder(self._db_folder)

        progress.update()

        self._java.execute_data_specification()

        progress.end()
        return dw_write_info
    def __java_app(self, dsg_targets, executable_targets, use_monitors):
        # create a progress bar for end users
        progress = ProgressBar(
            4, "Executing data specifications and loading data for "
            "application vertices using Java")

        dsg_targets.mark_system_cores(system_cores(executable_targets))
        progress.update()

        # Copy data from WriteMemoryIOData to database
        dw_write_info = DsWriteInfo(dsg_targets.get_database())
        dw_write_info.clear_write_info()
        if self._write_info_map is not None:
            for core, info in iteritems(self._write_info_map):
                dw_write_info[core] = info

        progress.update()

        dsg_targets.set_app_id(self._app_id)
        self._java.set_machine(self._machine)
        self._java.set_report_folder(self._db_folder)
        if use_monitors:
            self._java.set_placements(self._placements, self._txrx)

        progress.update()

        self._java.execute_app_data_specification(use_monitors)

        progress.end()
        return dw_write_info
    def __call__(self, placements, file_path):
        """
        :param placements: the memory placements object
        :param file_path: the file path for the placements.json
        :return: file path for the placements.json
        """

        # write basic stuff
        json_obj = dict()
        vertex_by_id = dict()

        progress = ProgressBar(placements.n_placements + 1,
                               "converting to JSON placements")

        # process placements
        for placement in progress.over(placements, False):
            vertex_id = ident(placement.vertex)
            vertex_by_id[vertex_id] = placement.vertex
            json_obj[vertex_id] = [placement.x, placement.y]

        # dump dict into json file
        with open(file_path, "w") as file_to_write:
            json.dump(json_obj, file_to_write)
        progress.update()

        # validate the schema
        file_format_schemas.validate(json_obj, "placements.json")
        progress.end()

        # return the file format
        return file_path, vertex_by_id
    def __call__(self, machine_graph, machine, plan_n_timesteps, placements):
        progress_bar = ProgressBar(7, "Routing")

        vertices_resources, nets, net_names = \
            convert_to_rig_graph_pure_mc(machine_graph, plan_n_timesteps)
        progress_bar.update()

        rig_machine = convert_to_rig_machine(machine)
        progress_bar.update()

        rig_constraints = create_rig_machine_constraints(machine)
        progress_bar.update()

        rig_constraints.extend(create_rig_graph_constraints(
            machine_graph, machine))
        progress_bar.update()

        rig_placements, rig_allocations = convert_to_rig_placements(
            placements, machine)
        progress_bar.update()

        rig_routes = route(
            vertices_resources, nets, rig_machine, rig_constraints,
            rig_placements, rig_allocations, "cores")
        rig_routes = {
            name: rig_routes[net] for net, name in iteritems(net_names)}
        progress_bar.update()

        routes = convert_from_rig_routes(rig_routes)
        progress_bar.update()
        progress_bar.end()

        return routes
Example #18
0
    def __call__(self,
                 transceiver,
                 tags=None,
                 iptags=None,
                 reverse_iptags=None):
        """
        :param ~spinnman.transceiver.Transceiver transceiver:
        :param ~.Tags tags:
        :param list(~spinn_machine.tags.IPTag) iptags:
        :param list(~spinn_machine.tags.ReverseIPTag) reverse_iptags:
        """
        # clear all the tags from the Ethernet connection, as nothing should
        # be allowed to use it (no two apps should use the same Ethernet
        # connection at the same time)
        progress = ProgressBar(MAX_TAG_ID, "Clearing tags")
        for tag_id in progress.over(range(MAX_TAG_ID)):
            transceiver.clear_ip_tag(tag_id)

        # Use tags object to supply tag info if it is supplied
        if tags is not None:
            iptags = list(tags.ip_tags)
            reverse_iptags = list(tags.reverse_ip_tags)

        # Load the IP tags and the Reverse IP tags
        progress = ProgressBar(
            len(iptags) + len(reverse_iptags), "Loading Tags")
        self.load_iptags(iptags, transceiver, progress)
        self.load_reverse_iptags(reverse_iptags, transceiver, progress)
        progress.end()
    def __call__(self, placements, file_path):
        """
        :param placements:
        :param file_path:
        """

        progress = ProgressBar(len(placements) + 1,
                               "Converting to JSON core allocations")

        # write basic stuff
        json_obj = OrderedDict()
        json_obj['type'] = "cores"
        vertex_by_id = OrderedDict()

        # process placements
        for placement in progress.over(placements, False):
            self._convert_placement(placement, vertex_by_id, json_obj)

        # dump dict into json file
        with open(file_path, "w") as f:
            json.dump(json_obj, f)
        progress.update()

        # validate the schema
        file_format_schemas.validate(json_obj, "core_allocations.json")

        # complete progress bar
        progress.end()

        # return the file format
        return file_path, vertex_by_id
    def __call__(
            self, transceiver, tags=None, iptags=None, reverse_iptags=None):
        """
        :param tags: the tags object which contains IP and reverse IP tags.
            could be none if these are being given in separate lists
        :param iptags: a list of IP tags, given when tags is none
        :param reverse_iptags: a list of reverse IP tags when tags is none.
        :param transceiver: the transceiver object
        """
        # clear all the tags from the Ethernet connection, as nothing should
        # be allowed to use it (no two apps should use the same Ethernet
        # connection at the same time)
        progress = ProgressBar(MAX_TAG_ID, "Clearing tags")
        for tag_id in progress.over(range(MAX_TAG_ID)):
            transceiver.clear_ip_tag(tag_id)

        # Use tags object to supply tag info if it is supplied
        if tags is not None:
            iptags = list(tags.ip_tags)
            reverse_iptags = list(tags.reverse_ip_tags)

        # Load the IP tags and the Reverse IP tags
        progress = ProgressBar(
            len(iptags) + len(reverse_iptags), "Loading Tags")
        self.load_iptags(iptags, transceiver, progress)
        self.load_reverse_iptags(reverse_iptags, transceiver, progress)
        progress.end()
    def __call__(self, txrx, app_id, all_core_subsets):
        # check that the right number of processors are in sync
        processors_completed = txrx.get_core_state_count(
            app_id, CPUState.FINISHED)
        total_processors = len(all_core_subsets)
        left_to_do_cores = total_processors - processors_completed

        progress = ProgressBar(
            left_to_do_cores,
            "Forcing error cores to generate provenance data")

        error_cores = txrx.get_cores_in_state(all_core_subsets,
                                              CPUState.RUN_TIME_EXCEPTION)
        watchdog_cores = txrx.get_cores_in_state(all_core_subsets,
                                                 CPUState.WATCHDOG)
        idle_cores = txrx.get_cores_in_state(all_core_subsets, CPUState.IDLE)

        if (len(error_cores) != 0 or len(watchdog_cores) != 0
                or len(idle_cores) != 0):
            raise ConfigurationException(
                "Some cores have crashed. RTE cores {}, watch-dogged cores {},"
                " idle cores {}".format(error_cores.values(),
                                        watchdog_cores.values(),
                                        idle_cores.values()))

        # check that all cores are in the state FINISHED which shows that
        # the core has received the message and done provenance updating
        attempts = 0
        while processors_completed != total_processors and attempts < 10:
            attempts += 1
            unsuccessful_cores = txrx.get_cores_not_in_state(
                all_core_subsets, CPUState.FINISHED)

            for (x, y, p) in unsuccessful_cores.iterkeys():
                data = struct.pack(
                    "<I", SDP_RUNNING_MESSAGE_CODES.
                    SDP_UPDATE_PROVENCE_REGION_AND_EXIT.value)
                txrx.send_sdp_message(
                    SDPMessage(SDPHeader(flags=SDPFlag.REPLY_NOT_EXPECTED,
                                         destination_cpu=p,
                                         destination_chip_x=x,
                                         destination_port=SDP_PORTS.
                                         RUNNING_COMMAND_SDP_PORT.value,
                                         destination_chip_y=y),
                               data=data))

            processors_completed = txrx.get_core_state_count(
                app_id, CPUState.FINISHED)

            left_over_now = total_processors - processors_completed
            to_update = left_to_do_cores - left_over_now
            left_to_do_cores = left_over_now
            if to_update != 0:
                progress.update(to_update)
        if attempts >= 10:
            logger.error("Unable to Finish getting provenance data. "
                         "Abandoned after too many retries. "
                         "Board may be left in an unstable state!")

        progress.end()
Example #22
0
    def __call__(self, machine_graph, machine, plan_n_timesteps):
        progress_bar = ProgressBar(7, "Placing")

        vertices_resources, nets, _ = \
            convert_to_rig_graph(machine_graph, plan_n_timesteps)
        progress_bar.update()

        rig_machine = convert_to_rig_machine(machine)
        progress_bar.update()

        rig_constraints = create_rig_machine_constraints(machine)
        progress_bar.update()

        rig_constraints.extend(create_rig_graph_constraints(
            machine_graph, rig_machine))
        progress_bar.update()

        rig_placements = place(
            vertices_resources, nets, rig_machine, rig_constraints)
        progress_bar.update()

        rig_allocations = allocate(
            vertices_resources, nets, rig_machine, rig_constraints,
            rig_placements)
        progress_bar.update()

        placements = convert_from_rig_placements(
            rig_placements, rig_allocations, machine_graph)
        progress_bar.update()
        progress_bar.end()

        return placements
Example #23
0
    def __call__(self, machine_graph, file_path):
        """
        :param machine_graph: The graph to convert
        :param file_path: Where to write the JSON
        """
        progress = ProgressBar(machine_graph.n_vertices + 1,
                               "Converting to JSON graph")

        # write basic stuff
        json_graph = OrderedDict()

        # write vertices data
        vertices = OrderedDict()
        json_graph["vertices_resources"] = vertices

        edges = OrderedDict()
        json_graph["edges"] = edges

        vertex_by_id = OrderedDict()
        partition_by_id = OrderedDict()
        for vertex in progress.over(machine_graph.vertices, False):
            self._convert_vertex(vertex, vertex_by_id, vertices, edges,
                                 machine_graph, partition_by_id)

        with open(file_path, "w") as file_to_write:
            json.dump(json_graph, file_to_write)
        progress.update()

        file_format_schemas.validate(json_graph, "machine_graph.json")

        progress.end()

        return file_path, vertex_by_id, partition_by_id
def synapse_expander(
        app_graph, graph_mapper, placements, transceiver,
        provenance_file_path, executable_finder):
    """ Run the synapse expander - needs to be done after data has been loaded
    """

    synapse_expander = executable_finder.get_executable_path(SYNAPSE_EXPANDER)
    delay_expander = executable_finder.get_executable_path(DELAY_EXPANDER)

    progress = ProgressBar(len(app_graph.vertices) + 2, "Expanding Synapses")

    # Find the places where the synapse expander and delay receivers should run
    expander_cores = ExecutableTargets()
    for vertex in progress.over(app_graph.vertices, finish_at_end=False):

        # Find population vertices
        if isinstance(
                vertex, (AbstractPopulationVertex, DelayExtensionVertex)):

            # Add all machine vertices of the population vertex to ones
            # that need synapse expansion
            for m_vertex in graph_mapper.get_machine_vertices(vertex):
                vertex_slice = graph_mapper.get_slice(m_vertex)
                if vertex.gen_on_machine(vertex_slice):
                    placement = placements.get_placement_of_vertex(m_vertex)
                    if isinstance(vertex, AbstractPopulationVertex):
                        binary = synapse_expander
                    else:
                        binary = delay_expander
                    expander_cores.add_processor(
                        binary, placement.x, placement.y, placement.p)

    # Launch the delay receivers
    expander_app_id = transceiver.app_id_tracker.get_new_id()
    transceiver.execute_application(expander_cores, expander_app_id)
    progress.update()

    # Wait for everything to finish
    finished = False
    try:
        transceiver.wait_for_cores_to_be_in_state(
            expander_cores.all_core_subsets, expander_app_id,
            [CPUState.FINISHED])
        progress.update()
        finished = True
        _extract_iobuf(expander_cores, transceiver, provenance_file_path)
        progress.end()
    except Exception:
        logger.exception("Synapse expander has failed")
        _handle_failure(
            expander_cores, transceiver, provenance_file_path)
    finally:
        transceiver.stop_application(expander_app_id)
        transceiver.app_id_tracker.free_id(expander_app_id)

        if not finished:
            raise SpynnakerException(
                "The synapse expander failed to complete")
    def __call__(self, placements, app_graph, executable_finder, transceiver,
                 machine_graph, routing_infos):
        """ Loads and runs the bit field generator on chip.

        :param ~pacman.model.placements.Placements placements: placements
        :param ~pacman.model.graphs.application.ApplicationGraph app_graph:
            the app graph
        :param executable_finder: the executable finder
        :type executable_finder:
            ~spinn_front_end_common.utilities.utility_objs.ExecutableFinder
        :param ~spinnman.transceiver.Transceiver transceiver:
            the SpiNNMan instance
        :param ~pacman.model.graphs.machine.MachineGraph machine_graph:
            the machine graph
        :param ~pacman.model.routing_info.RoutingInfo routing_infos:
            the key to edge map
        """
        self.__txrx = transceiver
        self.__placements = placements
        self.__aplx = executable_finder.get_executable_path(
            self._BIT_FIELD_EXPANDER_APLX)

        # progress bar
        progress = ProgressBar(
            app_graph.n_vertices + machine_graph.n_vertices + 1,
            "Running bitfield generation on chip")

        # get data
        expander_cores = self._calculate_core_data(app_graph, progress)

        # load data
        bit_field_app_id = transceiver.app_id_tracker.get_new_id()
        progress.update(1)

        # run app
        system_control_logic.run_system_application(
            expander_cores,
            bit_field_app_id,
            transceiver,
            executable_finder,
            get_config_bool("Reports", "write_bit_field_iobuf"),
            self.__check_for_success, [CPUState.FINISHED],
            False,
            "bit_field_expander_on_{}_{}_{}.txt",
            progress_bar=progress)
        # update progress bar
        progress.end()

        # read in bit fields for debugging purposes
        if get_config_bool("Reports", "generate_bit_field_report"):
            self._full_report_bit_fields(
                app_graph,
                os.path.join(report_default_directory(),
                             self._BIT_FIELD_REPORT_FILENAME))
            self._summary_report_bit_fields(
                app_graph,
                os.path.join(report_default_directory(),
                             self._BIT_FIELD_SUMMARY_REPORT_FILENAME))
Example #26
0
def synapse_expander(
        app_graph, graph_mapper, placements, transceiver,
        provenance_file_path, executable_finder):
    """ Run the synapse expander - needs to be done after data has been loaded
    """

    synapse_expander = executable_finder.get_executable_path(SYNAPSE_EXPANDER)
    delay_expander = executable_finder.get_executable_path(DELAY_EXPANDER)

    progress = ProgressBar(len(app_graph.vertices) + 2, "Expanding Synapses")

    # Find the places where the synapse expander and delay receivers should run
    expander_cores = ExecutableTargets()
    for vertex in progress.over(app_graph.vertices, finish_at_end=False):

        # Find population vertices
        if isinstance(
                vertex, (AbstractPopulationVertex, DelayExtensionVertex)):

            # Add all machine vertices of the population vertex to ones
            # that need synapse expansion
            for m_vertex in graph_mapper.get_machine_vertices(vertex):
                vertex_slice = graph_mapper.get_slice(m_vertex)
                if vertex.gen_on_machine(vertex_slice):
                    placement = placements.get_placement_of_vertex(m_vertex)
                    if isinstance(vertex, AbstractPopulationVertex):
                        binary = synapse_expander
                    else:
                        binary = delay_expander
                    expander_cores.add_processor(
                        binary, placement.x, placement.y, placement.p)

    # Launch the delay receivers
    expander_app_id = transceiver.app_id_tracker.get_new_id()
    transceiver.execute_application(expander_cores, expander_app_id)
    progress.update()

    # Wait for everything to finish
    finished = False
    try:
        transceiver.wait_for_cores_to_be_in_state(
            expander_cores.all_core_subsets, expander_app_id,
            [CPUState.FINISHED])
        progress.update()
        finished = True
        _extract_iobuf(expander_cores, transceiver, provenance_file_path)
        progress.end()
    except Exception:
        logger.exception("Synapse expander has failed")
        _handle_failure(
            expander_cores, transceiver, provenance_file_path)
    finally:
        transceiver.stop_application(expander_app_id)
        transceiver.app_id_tracker.free_id(expander_app_id)

        if not finished:
            raise SpynnakerException(
                "The synapse expander failed to complete")
    def __call__(self, app_id, txrx, executable_types):
        """
        :param int app_id:
        :param ~spinnman.transceiver.Transceiver txrx:
        :param dict(ExecutableType,~spinn_machine.CoreSubsets) \
                executable_types:
        :raises ExecutableFailedToStopException:
        """

        total_processors = \
            len(executable_types[ExecutableType.USES_SIMULATION_INTERFACE])
        all_core_subsets = \
            executable_types[ExecutableType.USES_SIMULATION_INTERFACE]

        progress = ProgressBar(
            total_processors,
            "Turning off all the cores within the simulation")

        # check that the right number of processors are finished
        processors_finished = txrx.get_core_state_count(
            app_id, CPUState.FINISHED)
        finished_cores = processors_finished

        while processors_finished != total_processors:
            if processors_finished > finished_cores:
                progress.update(processors_finished - finished_cores)
                finished_cores = processors_finished

            processors_rte = txrx.get_core_state_count(
                app_id, CPUState.RUN_TIME_EXCEPTION)
            processors_watchdogged = txrx.get_core_state_count(
                app_id, CPUState.WATCHDOG)

            if processors_rte > 0 or processors_watchdogged > 0:
                raise ExecutableFailedToStopException(
                    "{} of {} processors went into an error state when"
                    " shutting down".format(
                        processors_rte + processors_watchdogged,
                        total_processors))

            successful_cores_finished = txrx.get_cores_in_state(
                all_core_subsets, CPUState.FINISHED)

            for core_subset in all_core_subsets:
                for processor in core_subset.processor_ids:
                    if not successful_cores_finished.is_core(
                            core_subset.x, core_subset.y, processor):
                        self._update_provenance_and_exit(
                            txrx, processor, core_subset)
            time.sleep(0.5)

            processors_finished = txrx.get_core_state_count(
                app_id, CPUState.FINISHED)

        progress.end()
    def __call__(self, machine_graph, n_keys_map, graph_mapper=None):
        # check that this algorithm supports the constraints
        check_algorithm_can_support_constraints(
            constrained_vertices=machine_graph.outgoing_edge_partitions,
            supported_constraints=[
                FixedMaskConstraint,
                FixedKeyAndMaskConstraint,
                ContiguousKeyRangeContraint, ShareKeyConstraint],
            abstract_constraint_type=AbstractKeyAllocatorConstraint)

        # verify that no edge has more than 1 of a constraint ,and that
        # constraints are compatible
        check_types_of_edge_constraint(machine_graph)

        # final keys allocations
        routing_infos = RoutingInfo()

        # Get the edges grouped by those that require the same key
        (fixed_keys, shared_keys, fixed_masks, fixed_fields, flexi_fields,
         continuous, noncontinuous) = get_edge_groups(
             machine_graph, EdgeTrafficType.MULTICAST)

        # Go through the groups and allocate keys
        progress = ProgressBar(
            machine_graph.n_outgoing_edge_partitions,
            "Allocating routing keys")

        # allocate the groups that have fixed keys
        for group in progress.over(fixed_keys, False):
            self._allocate_fixed_keys(group, routing_infos)

        for group in progress.over(fixed_masks, False):
            self._allocate_fixed_masks(group, n_keys_map, routing_infos)

        for group in progress.over(fixed_fields, False):
            self._allocate_fixed_fields(group, n_keys_map, routing_infos)

        if flexi_fields:
            raise PacmanConfigurationException(
                "MallocBasedRoutingInfoAllocator does not support FlexiField")

        for group in progress.over(shared_keys, False):
            self._allocate_share_key(group, routing_infos, n_keys_map)

        for group in continuous:
            self._allocate_other_groups(group, routing_infos, n_keys_map,
                                        continuous=True)

        for group in noncontinuous:
            self._allocate_other_groups(group, routing_infos, n_keys_map,
                                        continuous=False)

        progress.end()
        return routing_infos
    def __call__(self, machine_graph, n_keys_map):
        """
        :param MachineGraph machine_graph:
        :param AbstractMachinePartitionNKeysMap n_keys_map:
        :rtype: RoutingInfo
        :raises PacmanRouteInfoAllocationException:
        """
        self._n_keys_map = n_keys_map
        # check that this algorithm supports the constraints
        check_algorithm_can_support_constraints(
            constrained_vertices=machine_graph.outgoing_edge_partitions,
            supported_constraints=[
                FixedMaskConstraint, FixedKeyAndMaskConstraint,
                ContiguousKeyRangeContraint, ShareKeyConstraint
            ],
            abstract_constraint_type=AbstractKeyAllocatorConstraint)

        # verify that no edge has more than 1 of a constraint ,and that
        # constraints are compatible
        check_types_of_edge_constraint(machine_graph)

        # final keys allocations
        routing_infos = RoutingInfo()

        # Get the edges grouped by those that require the same key
        (fixed_keys, shared_keys, fixed_masks, fixed_fields, continuous,
         noncontinuous) = get_mulitcast_edge_groups(machine_graph)

        # Go through the groups and allocate keys
        progress = ProgressBar(machine_graph.n_outgoing_edge_partitions,
                               "Allocating routing keys")

        # allocate the groups that have fixed keys
        for group in progress.over(fixed_keys, False):
            self._allocate_fixed_keys(group, routing_infos)

        for group in progress.over(fixed_masks, False):
            self._allocate_fixed_masks(group, routing_infos)

        for group in progress.over(fixed_fields, False):
            self._allocate_fixed_fields(group, routing_infos)

        for group in progress.over(shared_keys, False):
            self._allocate_share_key(group, routing_infos)

        for group in continuous:
            self._allocate_other_groups(group, routing_infos, True)

        for group in noncontinuous:
            self._allocate_other_groups(group, routing_infos, False)

        progress.end()
        return routing_infos
    def __call__(self, machine_graph, n_keys_map, graph_mapper=None):
        # check that this algorithm supports the constraints
        check_algorithm_can_support_constraints(
            constrained_vertices=machine_graph.outgoing_edge_partitions,
            supported_constraints=[
                FixedMaskConstraint, FixedKeyAndMaskConstraint,
                ContiguousKeyRangeContraint, ShareKeyConstraint
            ],
            abstract_constraint_type=AbstractKeyAllocatorConstraint)

        # verify that no edge has more than 1 of a constraint ,and that
        # constraints are compatible
        utilities.check_types_of_edge_constraint(machine_graph)

        # final keys allocations
        routing_infos = RoutingInfo()

        # Get the edges grouped by those that require the same key
        (fixed_keys, shared_keys, fixed_masks, fixed_fields, flexi_fields,
         continuous,
         noncontinuous) = utilities.get_edge_groups(machine_graph,
                                                    EdgeTrafficType.MULTICAST)

        # Even non-continuous keys will be continuous
        for group in noncontinuous:
            continuous.append(group)

        # Go through the groups and allocate keys
        progress = ProgressBar(machine_graph.n_outgoing_edge_partitions,
                               "Allocating routing keys")

        # allocate the groups that have fixed keys
        for group in progress.over(fixed_keys, False):
            self._allocate_fixed_keys(group, routing_infos)

        for group in progress.over(fixed_masks, False):
            self._allocate_fixed_masks(group, n_keys_map, routing_infos)

        for group in progress.over(fixed_fields, False):
            self._allocate_fixed_fields(group, n_keys_map, routing_infos)

        if flexi_fields:
            raise PacmanConfigurationException(
                "MallocBasedRoutingInfoAllocator does not support FlexiField")

        for group in progress.over(shared_keys, False):
            self._allocate_share_key(group, routing_infos, n_keys_map)

        for group in continuous:
            self._allocate_continuous_groups(group, routing_infos, n_keys_map)

        progress.end()
        return routing_infos
    def __call__(self, executable_targets, app_id, transceiver):
        progress = ProgressBar(
            executable_targets.total_processors + 1,
            "Loading executables onto the machine")

        for binary in executable_targets.binaries:
            progress.update(self._launch_binary(
                executable_targets, binary, transceiver, app_id))

        self._start_simulation(executable_targets, transceiver, app_id)
        progress.update()
        progress.end()
    def __call__(self, executable_targets, app_id, transceiver):
        progress = ProgressBar(executable_targets.total_processors + 1,
                               "Loading executables onto the machine")

        for binary in executable_targets.binaries:
            progress.update(
                self._launch_binary(executable_targets, binary, transceiver,
                                    app_id))

        self._start_simulation(executable_targets, transceiver, app_id)
        progress.update()
        progress.end()
    def __call__(self, machine_graph, placements, buffer_manager):

        # Count the regions to be read
        n_regions_to_read, vertices = self._count_regions(machine_graph)

        # Read back the regions
        progress = ProgressBar(
            n_regions_to_read, "Extracting buffers from the last run")
        try:
            buffer_manager.get_data_for_vertices(vertices, progress)
        finally:
            progress.end()
    def __call__(self, machine_graph, machine, plan_n_timesteps):
        """

        :param machine_graph: The machine_graph to place
        :type machine_graph:\
            :py:class:`pacman.model.graphs.machine.MachineGraph`
        :param machine:\
            The machine with respect to which to partition the application\
            graph
        :type machine: :py:class:`spinn_machine.Machine`
        :param plan_n_timesteps: number of timesteps to plan for
        :type  plan_n_timesteps: int
        :return: A set of placements
        :rtype: :py:class:`pacman.model.placements.Placements`
        :raise pacman.exceptions.PacmanPlaceException: \
            If something goes wrong with the placement
        """
        # check that the algorithm can handle the constraints
        self._check_constraints(machine_graph.vertices)

        # Sort the vertices into those with and those without
        # placement constraints
        placements = Placements()
        constrained = list()
        unconstrained = set()
        for vertex in machine_graph.vertices:
            if locate_constraints_of_type(
                    vertex.constraints, AbstractPlacerConstraint):
                constrained.append(vertex)
            else:
                unconstrained.add(vertex)

        # Iterate over constrained vertices and generate placements
        progress = ProgressBar(
            machine_graph.n_vertices, "Placing graph vertices")
        resource_tracker = ResourceTracker(
            machine, plan_n_timesteps, self._generate_radial_chips(machine))
        constrained = sort_vertices_by_known_constraints(constrained)
        for vertex in progress.over(constrained, False):
            self._place_vertex(vertex, resource_tracker, machine, placements)

        while unconstrained:
            # Place the subgraph with the overall most connected vertex
            max_connected_vertex = self._find_max_connected_vertex(
                unconstrained, machine_graph)
            self._place_unconstrained_subgraph(
                max_connected_vertex, machine_graph, unconstrained,
                machine, placements, resource_tracker, progress)

        # finished, so stop progress bar and return placements
        progress.end()
        return placements
    def load_initial_buffers(self):
        """ Load the initial buffers for the senders using memory writes.
        """
        total_data = 0
        for vertex in self._sender_vertices:
            for region in vertex.get_regions():
                total_data += vertex.get_region_buffer_size(region)

        progress = ProgressBar(total_data, "Loading buffers")
        for vertex in self._sender_vertices:
            for region in vertex.get_regions():
                self._send_initial_messages(vertex, region, progress)
        progress.end()
    def _run(self, machine_graph, machine, plan_n_timesteps):
        """
        :param MachineGraph machine_graph: The machine_graph to place
        :param ~spinn_machine.Machine machine:
            The machine with respect to which to partition the application
            graph
        :param int plan_n_timesteps: number of timesteps to plan for
        :return: A set of placements
        :rtype: ~pacman.model.placements.Placements
        :raise PacmanPlaceException:
            If something goes wrong with the placement
        """
        # check that the algorithm can handle the constraints
        self._check_constraints(machine_graph.vertices)

        # Sort the vertices into those with and those without
        # placement constraints
        placements = Placements()
        constrained = list()
        unconstrained = set()
        for vertex in machine_graph.vertices:
            if locate_constraints_of_type(vertex.constraints,
                                          AbstractPlacerConstraint):
                constrained.append(vertex)
            else:
                unconstrained.add(vertex)

        # Iterate over constrained vertices and generate placements
        progress = ProgressBar(machine_graph.n_vertices,
                               "Placing graph vertices")
        resource_tracker = ResourceTracker(
            machine, plan_n_timesteps, self._generate_radial_chips(machine))
        constrained = sort_vertices_by_known_constraints(constrained)
        vertices_on_same_chip = get_same_chip_vertex_groups(machine_graph)
        for vertex in progress.over(constrained, False):
            self._place_vertex(vertex, resource_tracker, machine, placements,
                               vertices_on_same_chip, machine_graph)

        while unconstrained:
            # Place the subgraph with the overall most connected vertex
            max_connected_vertex = self._find_max_connected_vertex(
                unconstrained, machine_graph)
            self._place_unconstrained_subgraph(max_connected_vertex,
                                               machine_graph, unconstrained,
                                               machine, placements,
                                               resource_tracker, progress,
                                               vertices_on_same_chip)

        # finished, so stop progress bar and return placements
        progress.end()
        return placements
Example #37
0
    def __load_images(self, executable_targets, app_id, txrx, filt, label):
        # Compute what work is to be done here
        binaries, cores = self.__filter(executable_targets, filt)

        # ISSUE: Loading order may be non-constant on older Python
        progress = ProgressBar(cores.total_processors + 1, label)
        for binary in binaries:
            progress.update(
                flood_fill_binary_to_spinnaker(executable_targets, binary,
                                               txrx, app_id))

        self.__start_simulation(cores, txrx, app_id)
        progress.update()
        progress.end()
    def __call__(self, machine_graph, placements, buffer_manager):

        # Count the regions to be read
        n_regions_to_read, recording_placements = self._count_regions(
            machine_graph, placements)

        # Read back the regions
        progress = ProgressBar(
            n_regions_to_read, "Extracting buffers from the last run")
        try:
            buffer_manager.get_data_for_placements(
                recording_placements, progress)
        finally:
            progress.end()
    def read_samples(self, buffer_manager):
        """ Read back the samples
        """
        progress = ProgressBar(len(self._mcmc_placements), "Reading results")
        samples = list()
        for placement in self._mcmc_placements:
            # Read the data recorded
            sample = placement.vertex.read_samples(buffer_manager, placement)
            if sample is not None:
                samples.append(sample)
            progress.update()
        progress.end()

        # Merge all the arrays
        return numpy.hstack(samples)
    def __call__(self, app_id, txrx, executable_targets, has_ran):
        if not has_ran:
            raise exceptions.ConfigurationException(
                "The ran token is not set correctly, please fix and try again")

        total_processors = executable_targets.total_processors
        all_core_subsets = executable_targets.all_core_subsets

        progress = ProgressBar(
            total_processors,
            "Turning off all the cores within the simulation")

        # check that the right number of processors are finished
        processors_finished = txrx.get_core_state_count(
            app_id, CPUState.FINISHED)
        finished_cores = processors_finished

        while processors_finished != total_processors:
            if processors_finished > finished_cores:
                progress.update(finished_cores - processors_finished)
                finished_cores = processors_finished

            processors_rte = txrx.get_core_state_count(
                app_id, CPUState.RUN_TIME_EXCEPTION)
            processors_watchdogged = txrx.get_core_state_count(
                app_id, CPUState.WATCHDOG)

            if processors_rte > 0 or processors_watchdogged > 0:
                raise exceptions.ExecutableFailedToStopException(
                    "{} of {} processors went into an error state when"
                    " shutting down".format(
                        processors_rte + processors_watchdogged,
                        total_processors))

            successful_cores_finished = txrx.get_cores_in_state(
                all_core_subsets, CPUState.FINISHED)

            for core_subset in all_core_subsets:
                for processor in core_subset.processor_ids:
                    if not successful_cores_finished.is_core(
                            core_subset.x, core_subset.y, processor):
                        self._update_provenance_and_exit(
                            txrx, processor, core_subset)

            processors_finished = txrx.get_core_state_count(
                app_id, CPUState.FINISHED)

        progress.end()
    def __call__(self, app_id, txrx, executable_types):

        total_processors = \
            len(executable_types[ExecutableType.USES_SIMULATION_INTERFACE])
        all_core_subsets = \
            executable_types[ExecutableType.USES_SIMULATION_INTERFACE]

        progress = ProgressBar(
            total_processors,
            "Turning off all the cores within the simulation")

        # check that the right number of processors are finished
        processors_finished = txrx.get_core_state_count(
            app_id, CPUState.FINISHED)
        finished_cores = processors_finished

        while processors_finished != total_processors:
            if processors_finished > finished_cores:
                progress.update(processors_finished - finished_cores)
                finished_cores = processors_finished

            processors_rte = txrx.get_core_state_count(
                app_id, CPUState.RUN_TIME_EXCEPTION)
            processors_watchdogged = txrx.get_core_state_count(
                app_id, CPUState.WATCHDOG)

            if processors_rte > 0 or processors_watchdogged > 0:
                raise ExecutableFailedToStopException(
                    "{} of {} processors went into an error state when"
                    " shutting down".format(
                        processors_rte + processors_watchdogged,
                        total_processors))

            successful_cores_finished = txrx.get_cores_in_state(
                all_core_subsets, CPUState.FINISHED)

            for core_subset in all_core_subsets:
                for processor in core_subset.processor_ids:
                    if not successful_cores_finished.is_core(
                            core_subset.x, core_subset.y, processor):
                        self._update_provenance_and_exit(
                            txrx, processor, core_subset)
            time.sleep(0.5)

            processors_finished = txrx.get_core_state_count(
                app_id, CPUState.FINISHED)

        progress.end()
    def __call__(self, report_folder, application_graph):
        """
        :param report_folder: the report folder to put figure into
        :param application_graph: the app graph
        :rtype: None
        """

        # create holders for data
        vertex_holders = dict()
        dot_diagram = self._get_diagram(
            "The graph of the network in graphical form")

        # build progress bar for the vertices, edges, and rendering
        progress = ProgressBar(
            application_graph.n_vertices +
            application_graph.n_outgoing_edge_partitions + 1,
            "generating the graphical representation of the neural network")

        # write vertices into dot diagram
        for vertex_counter, vertex in progress.over(
                enumerate(application_graph.vertices), False):
            dot_diagram.node(
                "{}".format(vertex_counter),
                "{} ({} neurons)".format(vertex.label, vertex.n_atoms))
            vertex_holders[vertex] = vertex_counter

        # write edges into dot diagram
        for partition in progress.over(
                application_graph.outgoing_edge_partitions, False):
            for edge in partition.edges:
                source_vertex_id = vertex_holders[edge.pre_vertex]
                dest_vertex_id = vertex_holders[edge.post_vertex]
                if isinstance(edge, ProjectionApplicationEdge):
                    for synapse_info in edge.synapse_information:
                        dot_diagram.edge(
                            "{}".format(source_vertex_id),
                            "{}".format(dest_vertex_id),
                            "{}".format(synapse_info.connector))
                else:
                    dot_diagram.edge(
                        "{}".format(source_vertex_id),
                        "{}".format(dest_vertex_id))

        # write dot file and generate pdf
        file_to_output = os.path.join(report_folder, "network_graph.gv")
        dot_diagram.render(file_to_output, view=False)
        progress.update()
        progress.end()
    def __call__(self, report_folder, application_graph):
        """
        :param report_folder: the report folder to put figure into
        :param application_graph: the app graph
        :rtype: None
        """

        # create holders for data
        vertex_holders = dict()
        dot_diagram = self._get_diagram(
            "The graph of the network in graphical form")

        # build progress bar for the vertices, edges, and rendering
        progress = ProgressBar(
            application_graph.n_vertices +
            application_graph.n_outgoing_edge_partitions + 1,
            "generating the graphical representation of the neural network")

        # write vertices into dot diagram
        for vertex_counter, vertex in progress.over(
                enumerate(application_graph.vertices), False):
            dot_diagram.node(
                "{}".format(vertex_counter),
                "{} ({} neurons)".format(vertex.label, vertex.n_atoms))
            vertex_holders[vertex] = vertex_counter

        # write edges into dot diagram
        for partition in progress.over(
                application_graph.outgoing_edge_partitions, False):
            for edge in partition.edges:
                source_vertex_id = vertex_holders[edge.pre_vertex]
                dest_vertex_id = vertex_holders[edge.post_vertex]
                if isinstance(edge, ProjectionApplicationEdge):
                    for synapse_info in edge.synapse_information:
                        dot_diagram.edge(
                            "{}".format(source_vertex_id),
                            "{}".format(dest_vertex_id),
                            "{}".format(synapse_info.connector))
                else:
                    dot_diagram.edge(
                        "{}".format(source_vertex_id),
                        "{}".format(dest_vertex_id))

        # write dot file and generate pdf
        file_to_output = os.path.join(report_folder, "network_graph.gv")
        dot_diagram.render(file_to_output, view=False)
        progress.update()
        progress.end()
Example #44
0
    def __call__(
            self, machine_graph, application_graph=None,
            provenance_data_objects=None):
        """

        :param machine_graph: The machine graph to inspect
        :param application_graph: The optional application graph
        :param provenance_data_objects: Any existing objects to append to
        """

        if provenance_data_objects is not None:
            prov_items = provenance_data_objects
        else:
            prov_items = list()

        progress = ProgressBar(
            machine_graph.n_vertices +
            machine_graph.n_outgoing_edge_partitions,
            "Getting provenance data from machine graph")
        for vertex in machine_graph.vertices:
            if isinstance(vertex, AbstractProvidesLocalProvenanceData):
                prov_items.extend(vertex.get_local_provenance_data())
            progress.update()
        for partition in machine_graph.outgoing_edge_partitions:
            for edge in partition.edges:
                if isinstance(edge, AbstractProvidesLocalProvenanceData):
                    prov_items.extend(edge.get_local_provenance_data())
            progress.update()
        progress.end()

        if application_graph is not None:
            progress = ProgressBar(
                application_graph.n_vertices +
                application_graph.n_outgoing_edge_partitions,
                "Getting provenance data from application graph")
            for vertex in application_graph.vertices:
                if isinstance(vertex, AbstractProvidesLocalProvenanceData):
                    prov_items.extend(vertex.get_local_provenance_data())
                progress.update()
            for partition in application_graph.outgoing_edge_partitions:
                for edge in partition.edges:
                    if isinstance(edge, AbstractProvidesLocalProvenanceData):
                        prov_items.extend(edge.get_local_provenance_data())
                progress.update()
            progress.end()

        return prov_items
    def __call__(self, machine, file_path):
        """
        :param machine:
        :param file_path:
        """
        progress = ProgressBar(
            (machine.max_chip_x + 1) * (machine.max_chip_y + 1) + 2,
            "Converting to JSON machine")

        # write basic stuff
        json_obj = {
            "width": machine.max_chip_x + 1,
            "height": machine.max_chip_y + 1,
            "chip_resources": {
                "cores": CHIP_HOMOGENEOUS_CORES,
                "sdram": CHIP_HOMOGENEOUS_SDRAM,
                "sram": CHIP_HOMOGENEOUS_SRAM,
                "router_entries": ROUTER_HOMOGENEOUS_ENTRIES,
                "tags": CHIP_HOMOGENEOUS_TAGS},
            "dead_chips": [],
            "dead_links": []}

        # handle exceptions (dead chips)
        exceptions = defaultdict(dict)
        for x in range(0, machine.max_chip_x + 1):
            for y in progress.over(range(0, machine.max_chip_y + 1), False):
                self._add_exceptions(json_obj, machine, x, y, exceptions)
        json_obj["chip_resource_exceptions"] = [
            [x, y, exceptions[x, y]] for x, y in exceptions]
        progress.update()

        # dump to json file
        with open(file_path, "w") as f:
            json.dump(json_obj, f)

        progress.update()

        # validate the schema
        file_format_schemas.validate(json_obj, "machine.json")

        # update and complete progress bar
        progress.end()

        return file_path
    def __call__(self, machine_graph, placements, machine,
                 vertex_to_ethernet_connected_chip_mapping,
                 application_graph=None, graph_mapper=None):
        """
        :param machine_graph: the machine graph instance
        :param placements: the placements
        :param machine: the machine object
        :param application_graph: the application graph
        :param vertex_to_ethernet_connected_chip_mapping: \
            mapping between ethernet connected chips and packet gatherers
        :param graph_mapper: the graph mapper
        :rtype: None
        """
        # pylint: disable=too-many-arguments
        n_app_vertices = 0
        if application_graph is not None:
            n_app_vertices = application_graph.n_vertices

        progress = ProgressBar(
            machine_graph.n_vertices + n_app_vertices,
            "Inserting edges between vertices which require FR speed up "
            "functionality.")

        for vertex in progress.over(machine_graph.vertices, False):
            if isinstance(vertex, ExtraMonitorSupportMachineVertex):
                self._process_vertex(
                    vertex, machine, placements, machine_graph,
                    vertex_to_ethernet_connected_chip_mapping,
                    application_graph, graph_mapper)

        if application_graph is not None:
            for vertex in progress.over(application_graph.vertices, False):
                if isinstance(vertex, ExtraMonitorSupport):
                    machine_verts = graph_mapper.get_machine_vertices(vertex)
                    for machine_vertex in machine_verts:
                        self._process_vertex(
                            machine_vertex, machine, placements, machine_graph,
                            vertex_to_ethernet_connected_chip_mapping,
                            application_graph, graph_mapper)
        progress.end()
class UpdateRuntimeProcess(AbstractMultiConnectionProcess):
    def __init__(self, connection_selector):
        super(UpdateRuntimeProcess, self).__init__(connection_selector)
        self._progress = None

    def receive_response(self, response):  # @UnusedVariable
        if self._progress is not None:
            self._progress.update()

    def update_runtime(self, run_time, infinite_run, core_subsets, n_cores):
        self._progress = ProgressBar(n_cores, "Updating run time")
        for core_subset in core_subsets:
            for processor_id in core_subset.processor_ids:
                self._send_request(
                    SCPUpdateRuntimeRequest(
                        core_subset.x, core_subset.y, processor_id,
                        run_time, infinite_run,
                        SDP_PORTS.RUNNING_COMMAND_SDP_PORT.value),
                    callback=self.receive_response)
        self._finish()
        self._progress.end()
        self.check_for_error()
class ClearIOBUFProcess(AbstractMultiConnectionProcess):

    def __init__(self, connection_selector):
        super(ClearIOBUFProcess, self).__init__(connection_selector)
        self._progress = None

    def receive_response(self, response):  # @UnusedVariable
        if self._progress is not None:
            self._progress.update()

    def clear_iobuf(self, core_subsets, n_cores):
        self._progress = ProgressBar(
            n_cores, "clearing IOBUF from the machine")
        for core_subset in core_subsets:
            for processor_id in core_subset.processor_ids:
                self._send_request(
                    SCPClearIOBUFRequest(
                        core_subset.x, core_subset.y, processor_id,
                        SDP_PORTS.RUNNING_COMMAND_SDP_PORT.value),
                    callback=self.receive_response)
        self._finish()
        self._progress.end()
        self.check_for_error()
    def call(self, inputs):

        # Get the inputs to pass as the arguments
        arg_inputs = self._get_inputs(inputs)

        # Convert the arguments using the inputs
        args = [
            arg.format(**arg_inputs) for arg in self._command_line_arguments
        ]

        algorithm_progress_bar = ProgressBar(
            1, "Running external algorithm {}".format(self._algorithm_id))

        # Run the external command
        child = subprocess.Popen(
            args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
            stdin=subprocess.PIPE)
        child.wait()

        algorithm_progress_bar.update(1)
        algorithm_progress_bar.end()

        # Detect any errors
        if child.returncode != 0:
            stdout, stderr = child.communicate()
            raise PacmanExternalAlgorithmFailedToCompleteException(
                    "Algorithm {} returned a non-zero error code {}\n"
                    "    Inputs: {}\n"
                    "    Output: {}\n"
                    "    Error: {}\n".format(
                        self._algorithm_id, child.returncode,
                        inputs.keys(), stdout, stderr))

        # Return the results processed into a dict
        # Use None here as the results don't actually exist, and are expected
        # to be obtained from a file, whose name is in inputs
        return self._get_outputs(inputs, [None] * len(self._outputs))
    def __call__(self, machine_graph, plan_n_timesteps, file_path):
        """
        :param machine_graph: The graph to convert
        :param plan_n_timesteps: number of timesteps to plan for
        :type  plan_n_timesteps: int
        :param file_path: Where to write the JSON
        """
        progress = ProgressBar(
            machine_graph.n_vertices + 1, "Converting to JSON graph")

        # write basic stuff
        json_graph = dict()

        # write vertices data
        vertices_resources = dict()
        json_graph["vertices_resources"] = vertices_resources

        edges_resources = defaultdict()
        json_graph["edges"] = edges_resources

        vertex_by_id = dict()
        partition_by_id = dict()
        for vertex in progress.over(machine_graph.vertices, False):
            self._convert_vertex(
                vertex, vertex_by_id, vertices_resources, edges_resources,
                machine_graph, plan_n_timesteps, partition_by_id)

        with open(file_path, "w") as f:
            json.dump(json_graph, f)
        progress.update()

        file_format_schemas.validate(json_graph, "machine_graph.json")

        progress.end()

        return file_path, vertex_by_id, partition_by_id
    def __call__(self, application_graph, graph_mapper, machine_graph,
                 n_keys_map):
        """
        :param application_graph: The application graph
        :param graph_mapper: the mapping between graphs
        :param machine_graph: the machine graph
        :param n_keys_map: the mapping between edges and n keys
        :return: routing information objects
        """
        progress_bar = ProgressBar(
            machine_graph.n_outgoing_edge_partitions * 3,
            "Allocating routing keys")

        # ensure groups are stable and correct
        self._determine_groups(
            machine_graph, graph_mapper, application_graph, n_keys_map,
            progress_bar)

        # define the key space
        bit_field_space = BitField(32)
        field_positions = set()

        # locate however many types of constraints there are
        seen_fields = deduce_types(machine_graph)
        progress_bar.update(machine_graph.n_outgoing_edge_partitions)

        if len(seen_fields) > 1:
            self._adds_application_field_to_the_fields(seen_fields)

        # handle the application space
        self._create_application_space_in_the_bit_field_space(
            bit_field_space, seen_fields, field_positions)

        # assign fields to positions in the space
        bit_field_space.assign_fields()

        # get positions of the flexible fields:
        self._assign_flexi_field_positions(
            bit_field_space, seen_fields, field_positions)

        # create routing_info_allocator
        routing_info = RoutingInfo()
        seen_mask_instances = 0

        # extract keys and masks for each edge from the bitfield
        for partition in machine_graph.outgoing_edge_partitions:
            # get keys and masks
            keys_and_masks, seen_mask_instances = \
                self._extract_keys_and_masks_from_bit_field(
                    partition, bit_field_space, n_keys_map,
                    seen_mask_instances)

            # update routing info for each edge in the partition
            partition_info = PartitionRoutingInfo(keys_and_masks, partition)
            routing_info.add_partition_info(partition_info)

            # update the progress bar again
            progress_bar.update()
        progress_bar.end()

        return routing_info, field_positions
    def __call__(self, machine_graph, n_keys_map, routing_tables):
        # check that this algorithm supports the constraints
        check_algorithm_can_support_constraints(
            constrained_vertices=machine_graph.outgoing_edge_partitions,
            supported_constraints=[
                FixedMaskConstraint,
                FixedKeyAndMaskConstraint,
                ContiguousKeyRangeContraint],
            abstract_constraint_type=AbstractKeyAllocatorConstraint)

        # verify that no edge has more than 1 of a constraint ,and that
        # constraints are compatible
        check_types_of_edge_constraint(machine_graph)

        routing_infos = RoutingInfo()

        # Get the edges grouped by those that require the same key
        (fixed_keys, _shared_keys, fixed_masks, fixed_fields, flexi_fields,
         continuous, noncontinuous) = \
            get_edge_groups(machine_graph, EdgeTrafficType.MULTICAST)
        if flexi_fields:
            raise PacmanConfigurationException(
                "MallocBasedRoutingInfoAllocator does not support FlexiField")

        # Even non-continuous keys will be continuous
        for group in noncontinuous:
            continuous.add(group)

        # Go through the groups and allocate keys
        progress = ProgressBar(
            machine_graph.n_outgoing_edge_partitions,
            "Allocating routing keys")

        # allocate the groups that have fixed keys
        for group in progress.over(fixed_keys, False):
            # Get any fixed keys and masks from the group and attempt to
            # allocate them
            fixed_mask = None
            fixed_key_and_mask_constraint = locate_constraints_of_type(
                group.constraints, FixedKeyAndMaskConstraint)[0]

            # attempt to allocate them
            self._allocate_fixed_keys_and_masks(
                fixed_key_and_mask_constraint.keys_and_masks, fixed_mask)

            # update the pacman data objects
            self._update_routing_objects(
                fixed_key_and_mask_constraint.keys_and_masks, routing_infos,
                group)
            continuous.remove(group)

        for group in progress.over(fixed_masks, False):
            # get mask and fields if need be
            fixed_mask = locate_constraints_of_type(
                group.constraints, FixedMaskConstraint)[0].mask

            fields = None
            if group in fixed_fields:
                fields = locate_constraints_of_type(
                    group.constraints, FixedKeyFieldConstraint)[0].fields
                fixed_fields.remove(group)

            # try to allocate
            keys_and_masks = self._allocate_keys_and_masks(
                fixed_mask, fields, n_keys_map.n_keys_for_partition(group))

            # update the pacman data objects
            self._update_routing_objects(keys_and_masks, routing_infos, group)
            continuous.remove(group)

        for group in progress.over(fixed_fields, False):
            fields = locate_constraints_of_type(
                group.constraints, FixedKeyFieldConstraint)[0].fields

            # try to allocate
            keys_and_masks = self._allocate_keys_and_masks(
                None, fields, n_keys_map.n_keys_for_partition(group))

            # update the pacman data objects
            self._update_routing_objects(keys_and_masks, routing_infos, group)
            continuous.remove(group)

        # Sort the rest of the groups, using the routing tables for guidance
        # Group partitions by those which share routes in any table
        partition_groups = OrderedDict()
        routers = reversed(sorted(
            routing_tables.get_routers(),
            key=lambda item: len(routing_tables.get_entries_for_router(
                item[0], item[1]))))
        for x, y in routers:

            # Find all partitions that share a route in this table
            partitions_by_route = defaultdict(OrderedSet)
            routing_table = routing_tables.get_entries_for_router(x, y)
            for partition, entry in iteritems(routing_table):
                if partition in continuous:
                    entry_hash = sum(
                        1 << i
                        for i in entry.link_ids)
                    entry_hash += sum(
                        1 << (i + 6)
                        for i in entry.processor_ids)
                    partitions_by_route[entry_hash].add(partition)

            for entry_hash, partitions in iteritems(partitions_by_route):
                found_groups = list()
                for partition in partitions:
                    if partition in partition_groups:
                        found_groups.append(partition_groups[partition])

                if not found_groups:
                    # If no group was found, create a new one
                    for partition in partitions:
                        partition_groups[partition] = partitions

                elif len(found_groups) == 1:
                    # If a single other group was found, merge it
                    for partition in partitions:
                        found_groups[0].add(partition)
                        partition_groups[partition] = found_groups[0]

                else:
                    # Merge the groups
                    new_group = partitions
                    for group in found_groups:
                        for partition in group:
                            new_group.add(partition)
                    for partition in new_group:
                        partition_groups[partition] = new_group

        # Sort partitions by largest group
        continuous = list(OrderedSet(
            tuple(group) for group in itervalues(partition_groups)))

        for group in reversed(sorted(continuous, key=len)):
            for partition in progress.over(group, False):
                keys_and_masks = self._allocate_keys_and_masks(
                    None, None, n_keys_map.n_keys_for_partition(partition))

                # update the pacman data objects
                self._update_routing_objects(
                    keys_and_masks, routing_infos, partition)

        progress.end()
        return routing_infos
    def __call__(
            self, graph, machine, plan_n_timesteps,
            preallocated_resources=None):
        """
        :param graph: The application_graph to partition
        :type graph:\
            :py:class:`pacman.model.graph.application.ApplicationGraph`
        :param machine: The machine with respect to which to partition the\
            application_graph
        :type machine: :py:class:`spinn_machine.Machine`
        :param plan_n_timesteps: number of timesteps to plan for
        :type  plan_n_timesteps: int
        :return: \
            A machine_graph of partitioned vertices and partitioned edges
        :rtype:\
            :py:class:`pacman.model.graph.machine.MachineGraph`
        :raise pacman.exceptions.PacmanPartitionException: \
            If something goes wrong with the partitioning
        """
        ResourceTracker.check_constraints(graph.vertices)
        utils.check_algorithm_can_support_constraints(
            constrained_vertices=graph.vertices,
            abstract_constraint_type=AbstractPartitionerConstraint,
            supported_constraints=[MaxVertexAtomsConstraint,
                                   SameAtomsAsVertexConstraint,
                                   FixedVertexAtomsConstraint])

        # Load the vertices and create the machine_graph to fill
        machine_graph = MachineGraph(
            label="partitioned graph for {}".format(graph.label))
        graph_mapper = GraphMapper()

        # sort out vertex's by placement constraints
        vertices = sort_vertices_by_known_constraints(graph.vertices)

        # Set up the progress
        n_atoms = 0
        for vertex in vertices:
            n_atoms += vertex.n_atoms
        progress = ProgressBar(n_atoms, "Partitioning graph vertices")

        resource_tracker = ResourceTracker(
            machine, plan_n_timesteps,
            preallocated_resources=preallocated_resources)

        # Group vertices that are supposed to be the same size
        vertex_groups = get_same_size_vertex_groups(vertices)

        # Partition one vertex at a time
        for vertex in vertices:

            # check that the vertex hasn't already been partitioned
            machine_vertices = graph_mapper.get_machine_vertices(vertex)

            # if not, partition
            if machine_vertices is None:
                self._partition_vertex(
                    vertex, plan_n_timesteps, machine_graph, graph_mapper,
                    resource_tracker, progress, vertex_groups)
        progress.end()

        generate_machine_edges(machine_graph, graph_mapper, graph)

        return machine_graph, graph_mapper, resource_tracker.chips_used
    def __call__(
            self, routing_tables, transceiver, machine, app_id,
            provenance_file_path, compress_only_when_needed=True,
            compress_as_much_as_possible=False):
        """
        :param routing_tables: the memory routing tables to be compressed
        :param transceiver: the spinnman interface
        :param machine: the SpiNNaker machine representation
        :param app_id: the application ID used by the main application
        :param provenance_file_path: the path to where to write the data
        :return: flag stating routing compression and loading has been done
        """
        # pylint: disable=too-many-arguments

        # build progress bar
        progress = ProgressBar(
            len(routing_tables.routing_tables) + 2,
            "Running routing table compression on chip")
        compressor_app_id = transceiver.app_id_tracker.get_new_id()

        # figure size of SDRAM needed for each chip for storing the routing
        # table
        for routing_table in progress.over(routing_tables, False):
            self._load_routing_table(
                routing_table, transceiver, app_id, compressor_app_id,
                compress_only_when_needed, compress_as_much_as_possible)

        # load the router compressor executable
        executable_targets = self._load_executables(
            routing_tables, compressor_app_id, transceiver, machine)

        # update progress bar
        progress.update()

        # Wait for the executable to finish
        succeeded = False
        try:
            transceiver.wait_for_cores_to_be_in_state(
                executable_targets.all_core_subsets, compressor_app_id,
                [CPUState.FINISHED])
            succeeded = True
        finally:
            # get the debug data
            if not succeeded:
                self._handle_failure(
                    executable_targets, transceiver, provenance_file_path,
                    compressor_app_id)

        # Check if any cores have not completed successfully
        self._check_for_success(
            executable_targets, transceiver,
            provenance_file_path, compressor_app_id)

        # update progress bar
        progress.update()

        # stop anything that's associated with the compressor binary
        transceiver.stop_application(compressor_app_id)
        transceiver.app_id_tracker.free_id(compressor_app_id)

        # update the progress bar
        progress.end()