def __call__(self, machine_graph, graph_mapper):
        """
        :param machine_graph: the machine_graph whose edges are to be filtered
        :param graph_mapper: the graph mapper between graphs
        :return: a new graph mapper and machine graph
        """
        new_machine_graph = MachineGraph(label=machine_graph.label)
        new_graph_mapper = GraphMapper()

        # create progress bar
        progress = ProgressBar(
            machine_graph.n_vertices +
            machine_graph.n_outgoing_edge_partitions,
            "Filtering edges")

        # add the vertices directly, as they wont be pruned.
        for vertex in progress.over(machine_graph.vertices, False):
            self._add_vertex_to_new_graph(
                vertex, graph_mapper, new_machine_graph, new_graph_mapper)

        # start checking edges to decide which ones need pruning....
        for partition in progress.over(machine_graph.outgoing_edge_partitions):
            for edge in partition.edges:
                if self._is_filterable(edge, graph_mapper):
                    logger.debug("this edge was pruned %s", edge)
                    continue
                logger.debug("this edge was not pruned %s", edge)
                self._add_edge_to_new_graph(
                    edge, partition, graph_mapper, new_machine_graph,
                    new_graph_mapper)

        # returned the pruned graph and graph_mapper
        return new_machine_graph, new_graph_mapper
    def __call__(self, machine_graph, n_keys_map, graph_mapper=None):
        # check that this algorithm supports the constraints
        check_algorithm_can_support_constraints(
            constrained_vertices=machine_graph.outgoing_edge_partitions,
            supported_constraints=[
                FixedMaskConstraint,
                FixedKeyAndMaskConstraint,
                ContiguousKeyRangeContraint, ShareKeyConstraint],
            abstract_constraint_type=AbstractKeyAllocatorConstraint)

        # verify that no edge has more than 1 of a constraint ,and that
        # constraints are compatible
        check_types_of_edge_constraint(machine_graph)

        # final keys allocations
        routing_infos = RoutingInfo()

        # Get the edges grouped by those that require the same key
        (fixed_keys, shared_keys, fixed_masks, fixed_fields, flexi_fields,
         continuous, noncontinuous) = get_edge_groups(
             machine_graph, EdgeTrafficType.MULTICAST)

        # Go through the groups and allocate keys
        progress = ProgressBar(
            machine_graph.n_outgoing_edge_partitions,
            "Allocating routing keys")

        # allocate the groups that have fixed keys
        for group in progress.over(fixed_keys, False):
            self._allocate_fixed_keys(group, routing_infos)

        for group in progress.over(fixed_masks, False):
            self._allocate_fixed_masks(group, n_keys_map, routing_infos)

        for group in progress.over(fixed_fields, False):
            self._allocate_fixed_fields(group, n_keys_map, routing_infos)

        if flexi_fields:
            raise PacmanConfigurationException(
                "MallocBasedRoutingInfoAllocator does not support FlexiField")

        for group in progress.over(shared_keys, False):
            self._allocate_share_key(group, routing_infos, n_keys_map)

        for group in continuous:
            self._allocate_other_groups(group, routing_infos, n_keys_map,
                                        continuous=True)

        for group in noncontinuous:
            self._allocate_other_groups(group, routing_infos, n_keys_map,
                                        continuous=False)

        progress.end()
        return routing_infos
    def __call__(self, report_folder, application_graph):
        """
        :param report_folder: the report folder to put figure into
        :param application_graph: the app graph
        :rtype: None
        """

        # create holders for data
        vertex_holders = dict()
        dot_diagram = self._get_diagram(
            "The graph of the network in graphical form")

        # build progress bar for the vertices, edges, and rendering
        progress = ProgressBar(
            application_graph.n_vertices +
            application_graph.n_outgoing_edge_partitions + 1,
            "generating the graphical representation of the neural network")

        # write vertices into dot diagram
        for vertex_counter, vertex in progress.over(
                enumerate(application_graph.vertices), False):
            dot_diagram.node(
                "{}".format(vertex_counter),
                "{} ({} neurons)".format(vertex.label, vertex.n_atoms))
            vertex_holders[vertex] = vertex_counter

        # write edges into dot diagram
        for partition in progress.over(
                application_graph.outgoing_edge_partitions, False):
            for edge in partition.edges:
                source_vertex_id = vertex_holders[edge.pre_vertex]
                dest_vertex_id = vertex_holders[edge.post_vertex]
                if isinstance(edge, ProjectionApplicationEdge):
                    for synapse_info in edge.synapse_information:
                        dot_diagram.edge(
                            "{}".format(source_vertex_id),
                            "{}".format(dest_vertex_id),
                            "{}".format(synapse_info.connector))
                else:
                    dot_diagram.edge(
                        "{}".format(source_vertex_id),
                        "{}".format(dest_vertex_id))

        # write dot file and generate pdf
        file_to_output = os.path.join(report_folder, "network_graph.gv")
        dot_diagram.render(file_to_output, view=False)
        progress.update()
        progress.end()
Exemplo n.º 4
0
    def __call__(self, machine_graph, machine, plan_n_timesteps):
        """ Place each vertex in a machine graph on a core in the machine.

        :param machine_graph: The machine_graph to place
        :type machine_graph:\
            :py:class:`pacman.model.graphs.machine.MachineGraph`
        :param machine: A SpiNNaker machine object.
        :type machine: :py:class:`spinn_machine.Machine`
        :param plan_n_timesteps: number of timesteps to plan for
        :type  plan_n_timesteps: int
        :return placements: Placements of vertices on the machine
        :rtype :py:class:`pacman.model.placements.Placements`
        """

        # check that the algorithm can handle the constraints
        ResourceTracker.check_constraints(machine_graph.vertices)

        placements = Placements()
        vertices = sort_vertices_by_known_constraints(machine_graph.vertices)

        # Iterate over vertices and generate placements
        progress = ProgressBar(machine_graph.n_vertices,
                               "Placing graph vertices")
        resource_tracker = ResourceTracker(
            machine, plan_n_timesteps, self._generate_random_chips(machine))
        vertices_on_same_chip = get_same_chip_vertex_groups(machine_graph)
        vertices_placed = set()
        for vertex in progress.over(vertices):
            if vertex not in vertices_placed:
                vertices_placed.update(self._place_vertex(
                    vertex, resource_tracker, machine, placements,
                    vertices_on_same_chip))
        return placements
    def __call__(self, report_folder, connection_holder, dsg_targets):
        """ Convert synaptic matrix for every application edge.
        """

        # Update the print options to display everything
        print_opts = numpy.get_printoptions()
        numpy.set_printoptions(threshold=numpy.nan)

        if dsg_targets is None:
            raise SynapticConfigurationException(
                "dsg_targets should not be none, used as a check for "
                "connection holder data to be generated")

        # generate folder for synaptic reports
        top_level_folder = os.path.join(report_folder, _DIRNAME)
        if not os.path.exists(top_level_folder):
            os.mkdir(top_level_folder)

        # create progress bar
        progress = ProgressBar(connection_holder.keys(),
                               "Generating synaptic matrix reports")

        # for each application edge, write matrix in new file
        for edge, _ in progress.over(connection_holder.keys()):
            # only write matrix's for edges which have matrix's
            if isinstance(edge, ProjectionApplicationEdge):
                # figure new file name
                file_name = os.path.join(
                    top_level_folder, _TMPL_FILENAME.format(edge.label))
                self._write_file(file_name, connection_holder, edge)

        # Reset the print options
        numpy.set_printoptions(**print_opts)
Exemplo n.º 6
0
    def __call__(self, machine_graph, machine, plan_n_timesteps):
        """ Place a machine_graph so that each vertex is placed on a core

        :param machine_graph: The machine_graph to place
        :type machine_graph:\
            :py:class:`pacman.model.graphs.machine.MachineGraph`
        :param machine:\
            The machine with respect to which to partition the application\
            graph
        :type machine: :py:class:`spinn_machine.Machine`
        :param plan_n_timesteps: number of timesteps to plan for
        :type  plan_n_timesteps: int
        :return: A set of placements
        :rtype: :py:class:`pacman.model.placements.Placements`
        :raise pacman.exceptions.PacmanPlaceException: \
            If something goes wrong with the placement
        """

        # check that the algorithm can handle the constraints
        ResourceTracker.check_constraints(machine_graph.vertices)

        placements = Placements()
        vertices = sort_vertices_by_known_constraints(machine_graph.vertices)

        # Iterate over vertices and generate placements
        progress = ProgressBar(vertices, "Placing graph vertices")
        resource_tracker = ResourceTracker(machine, plan_n_timesteps)
        for vertex in progress.over(vertices):
            # Create and store a new placement anywhere on the board
            (x, y, p, _, _) = resource_tracker.allocate_constrained_resources(
                vertex.resources_required, vertex.constraints, None)
            placement = Placement(vertex, x, y, p)
            placements.add_placement(placement)
        return placements
    def __call__(
            self, transceiver, placements, provenance_file_path,
            run_time_ms, machine_time_step):
        """
        :param transceiver: the SpiNNMan interface object
        :param placements: The placements of the vertices
        :param has_ran: token that states that the simulation has ran
        :param provenance_file_path: The location to store the profile data
        :param run_time_ms: runtime in ms
        :param machine_time_step: machine time step in ms
        """
        # pylint: disable=too-many-arguments
        machine_time_step_ms = machine_time_step // 1000

        progress = ProgressBar(
            placements.n_placements, "Getting profile data")

        # retrieve provenance data from any cores that provide data
        for placement in progress.over(placements.placements):
            if isinstance(placement.vertex, AbstractHasProfileData):
                # get data
                profile_data = placement.vertex.get_profile_data(
                    transceiver, placement)
                if profile_data.tags:
                    self._write(placement, profile_data, run_time_ms,
                                machine_time_step_ms, provenance_file_path)
    def __call__(
            self, live_packet_gatherer_parameters, machine, machine_graph,
            application_graph=None, graph_mapper=None):
        """ Add LPG vertices on Ethernet connected chips as required.

        :param live_packet_gatherer_parameters:\
            the Live Packet Gatherer parameters requested by the script
        :param machine: the SpiNNaker machine as discovered
        :param application_graph: the application graph
        :param machine_graph: the machine graph
        :return: mapping between LPG parameters and LPG vertex
        """
        # pylint: disable=too-many-arguments

        # create progress bar
        progress = ProgressBar(
            machine.ethernet_connected_chips,
            string_describing_what_being_progressed=(
                "Adding Live Packet Gatherers to Graph"))

        # Keep track of the vertices added by parameters and board address
        lpg_params_to_vertices = defaultdict(dict)

        # for every Ethernet connected chip, add the gatherers required
        for chip in progress.over(machine.ethernet_connected_chips):
            for params in live_packet_gatherer_parameters:
                if (params.board_address is None or
                        params.board_address == chip.ip_address):
                    lpg_params_to_vertices[params][chip.x, chip.y] = \
                        self._add_lpg_vertex(application_graph, graph_mapper,
                                             machine_graph, chip, params)

        return lpg_params_to_vertices
    def get_spikes(self, label, buffer_manager, region,
                   placements, graph_mapper, application_vertex,
                   base_key_function, machine_time_step):
        # pylint: disable=too-many-arguments
        results = list()
        missing = []
        ms_per_tick = machine_time_step / 1000.0
        vertices = graph_mapper.get_machine_vertices(application_vertex)
        progress = ProgressBar(vertices,
                               "Getting spikes for {}".format(label))
        for vertex in progress.over(vertices):
            placement = placements.get_placement_of_vertex(vertex)
            vertex_slice = graph_mapper.get_slice(vertex)

            # Read the spikes
            raw_spike_data, data_missing = \
                buffer_manager.get_data_by_placement(placement, region)
            if data_missing:
                missing.append(placement)
            self._process_spike_data(
                vertex_slice, raw_spike_data, ms_per_tick,
                base_key_function(vertex), results)

        if missing:
            missing_str = recording_utils.make_missing_string(missing)
            logger.warning(
                "Population {} is missing spike data in region {} from the"
                " following cores: {}", label, region, missing_str)
        if not results:
            return numpy.empty(shape=(0, 2))
        result = numpy.vstack(results)
        return result[numpy.lexsort((result[:, 1], result[:, 0]))]
    def __call__(
            self, live_packet_gatherer_parameters, placements,
            live_packet_gatherers_to_vertex_mapping, machine,
            machine_graph, application_graph=None, graph_mapper=None):
        """
        :param live_packet_gatherer_parameters: the set of parameters
        :param placements: the placements object
        :param live_packet_gatherers_to_vertex_mapping:\
            the mapping of LPG parameters and the machine vertices associated\
            with it
        :param machine: the SpiNNaker machine
        :param machine_graph: the machine graph
        :param application_graph:  the app graph
        :param graph_mapper: the graph mapper between app and machine graph
        :rtype: None
        """
        # pylint: disable=too-many-arguments
        progress = ProgressBar(
            live_packet_gatherer_parameters,
            string_describing_what_being_progressed=(
                "Adding edges to the machine graph between the vertices to "
                "which live output has been requested and its local Live "
                "Packet Gatherer"))

        for lpg_params in progress.over(live_packet_gatherer_parameters):
            # locate vertices needed to be connected to a LPG with these params
            for vertex in live_packet_gatherer_parameters[lpg_params]:
                self._connect_lpg_vertex(
                    application_graph, graph_mapper, machine,
                    placements, machine_graph, vertex,
                    live_packet_gatherers_to_vertex_mapping, lpg_params)
    def __call__(self, placements, file_path):
        """
        :param placements:
        :param file_path:
        """

        progress = ProgressBar(len(placements) + 1,
                               "Converting to JSON core allocations")

        # write basic stuff
        json_obj = OrderedDict()
        json_obj['type'] = "cores"
        vertex_by_id = OrderedDict()

        # process placements
        for placement in progress.over(placements, False):
            self._convert_placement(placement, vertex_by_id, json_obj)

        # dump dict into json file
        with open(file_path, "w") as f:
            json.dump(json_obj, f)
        progress.update()

        # validate the schema
        file_format_schemas.validate(json_obj, "core_allocations.json")

        # complete progress bar
        progress.end()

        # return the file format
        return file_path, vertex_by_id
    def __call__(
            self, transceiver, tags=None, iptags=None, reverse_iptags=None):
        """
        :param tags: the tags object which contains IP and reverse IP tags.
            could be none if these are being given in separate lists
        :param iptags: a list of IP tags, given when tags is none
        :param reverse_iptags: a list of reverse IP tags when tags is none.
        :param transceiver: the transceiver object
        """
        # clear all the tags from the Ethernet connection, as nothing should
        # be allowed to use it (no two apps should use the same Ethernet
        # connection at the same time)
        progress = ProgressBar(MAX_TAG_ID, "Clearing tags")
        for tag_id in progress.over(range(MAX_TAG_ID)):
            transceiver.clear_ip_tag(tag_id)

        # Use tags object to supply tag info if it is supplied
        if tags is not None:
            iptags = list(tags.ip_tags)
            reverse_iptags = list(tags.reverse_ip_tags)

        # Load the IP tags and the Reverse IP tags
        progress = ProgressBar(
            len(iptags) + len(reverse_iptags), "Loading Tags")
        self.load_iptags(iptags, transceiver, progress)
        self.load_reverse_iptags(reverse_iptags, transceiver, progress)
        progress.end()
    def __call__(self, report_default_directory, dsg_targets, transceiver):
        """ Creates a report that states where in SDRAM each region is \
            (read from machine)

        :param report_default_directory: the folder where reports are written
        :param dsg_targets: the map between placement and file writer
        :param transceiver: the spinnMan instance
        :rtype: None
        """

        directory_name = os.path.join(
            report_default_directory, MEM_MAP_SUBDIR_NAME)
        if not os.path.exists(directory_name):
            os.makedirs(directory_name)

        progress = ProgressBar(dsg_targets, "Writing memory map reports")
        for (x, y, p) in progress.over(dsg_targets):
            file_name = os.path.join(
                directory_name, MEM_MAP_FILENAME.format(x, y, p))
            try:
                with open(file_name, "w") as f:
                    self._describe_mem_map(f, transceiver, x, y, p)
            except IOError:
                logger.exception("Generate_placement_reports: Can't open file"
                                 " {} for writing.", file_name)
    def __call__(self, report_default_directory, machine):
        """ Creates a report that states where in SDRAM each region is.

        :param report_default_directory: the folder where reports are written
        :param machine: python representation of the machine
        :rtype: None
        """

        # create file path
        directory_name = os.path.join(
            report_default_directory, self.AREA_CODE_REPORT_NAME)

        # create the progress bar for end users
        progress_bar = ProgressBar(
            len(machine.ethernet_connected_chips),
            "Writing the board chip report")

        # iterate over ethernet chips and then the chips on that board
        with open(directory_name, "w") as writer:
            for ethernet_connected_chip in \
                    progress_bar.over(machine.ethernet_connected_chips):
                chips = machine.get_chips_on_board(ethernet_connected_chip)
                writer.write(
                    "board with IP address : {} : has chips {}\n".format(
                        ethernet_connected_chip.ip_address, list(chips)))
    def __call__(self, router_tables, target_length=None):
        # build storage
        compressed_pacman_router_tables = MulticastRoutingTables()

        # create progress bar
        progress = ProgressBar(
            router_tables.routing_tables, "Compressing routing Tables")

        # compress each router
        for router_table in progress.over(router_tables.routing_tables):
            # convert to rig format
            entries = self._convert_to_mundy_format(router_table)

            # compress the router entries
            compressed_router_table_entries = \
                rigs_compressor.minimise(entries, target_length)

            # convert back to pacman model
            compressed_pacman_table = self._convert_to_pacman_router_table(
                compressed_router_table_entries, router_table.x,
                router_table.y)

            # add to new compressed routing tables
            compressed_pacman_router_tables.add_routing_table(
                compressed_pacman_table)

        # return
        return compressed_pacman_router_tables
    def __call__(self, placements, file_path):
        """
        :param placements: the memory placements object
        :param file_path: the file path for the placements.json
        :return: file path for the placements.json
        """

        # write basic stuff
        json_obj = dict()
        vertex_by_id = dict()

        progress = ProgressBar(placements.n_placements + 1,
                               "converting to JSON placements")

        # process placements
        for placement in progress.over(placements, False):
            vertex_id = ident(placement.vertex)
            vertex_by_id[vertex_id] = placement.vertex
            json_obj[vertex_id] = [placement.x, placement.y]

        # dump dict into json file
        with open(file_path, "w") as file_to_write:
            json.dump(json_obj, file_to_write)
        progress.update()

        # validate the schema
        file_format_schemas.validate(json_obj, "placements.json")
        progress.end()

        # return the file format
        return file_path, vertex_by_id
    def __call__(self, router_tables):
        tables = MulticastRoutingTables()
        previous_masks = dict()

        progress = ProgressBar(
            len(router_tables.routing_tables) * 2,
            "Compressing Routing Tables")

        # Create all masks without holes
        allowed_masks = [_32_BITS - ((2 ** i) - 1) for i in range(33)]

        # Check that none of the masks have "holes" e.g. 0xFFFF0FFF has a hole
        for router_table in router_tables.routing_tables:
            for entry in router_table.multicast_routing_entries:
                if entry.mask not in allowed_masks:
                    raise PacmanRoutingException(
                        "Only masks without holes are allowed in tables for"
                        " BasicRouteMerger (disallowed mask={})".format(
                            hex(entry.mask)))

        for router_table in progress.over(router_tables.routing_tables):
            new_table = self._merge_routes(router_table, previous_masks)
            tables.add_routing_table(new_table)
            n_entries = len([
                entry for entry in new_table.multicast_routing_entries
                if not entry.defaultable])
            # print("Reduced from {} to {}".format(
            #     len(router_table.multicast_routing_entries), n_entries))
            if n_entries > 1023:
                raise PacmanRoutingException(
                    "Cannot make table small enough: {} entries".format(
                        n_entries))

        return tables
    def __call__(self, machine_graph=None, application_graph=None,
                 graph_mapper=None):
        # Generate an n_keys map for the graph and add constraints
        n_keys_map = DictBasedMachinePartitionNKeysMap()

        if machine_graph is None:
            raise ConfigurationException(
                "A machine graph is required for this mapper. "
                "Please choose and try again")
        if (application_graph is None) != (graph_mapper is None):
            raise ConfigurationException(
                "Can only do one graph. semantically doing 2 graphs makes no "
                "sense. Please choose and try again")

        if application_graph is not None:
            # generate progress bar
            progress = ProgressBar(
                machine_graph.n_vertices,
                "Getting number of keys required by each edge using "
                "application graph")

            # iterate over each partition in the graph
            for vertex in progress.over(machine_graph.vertices):
                partitions = machine_graph.\
                    get_outgoing_edge_partitions_starting_at_vertex(
                        vertex)
                for partition in partitions:
                    if partition.traffic_type == EdgeTrafficType.MULTICAST:
                        self._process_application_partition(
                            partition, n_keys_map, graph_mapper)

        else:
            # generate progress bar
            progress = ProgressBar(
                machine_graph.n_vertices,
                "Getting number of keys required by each edge using "
                "machine graph")

            for vertex in progress.over(machine_graph.vertices):
                partitions = machine_graph.\
                    get_outgoing_edge_partitions_starting_at_vertex(
                        vertex)
                for partition in partitions:
                    if partition.traffic_type == EdgeTrafficType.MULTICAST:
                        self._process_machine_partition(partition, n_keys_map)

        return n_keys_map
    def __call__(
            self, placements, hostname,
            report_default_directory, write_text_specs,
            machine, graph_mapper=None, placement_order=None):
        """
        :param placements: placements of machine graph to cores
        :param hostname: SpiNNaker machine name
        :param report_default_directory: the location where reports are stored
        :param write_text_specs:\
            True if the textual version of the specification is to be written
        :param machine: the python representation of the SpiNNaker machine
        :param graph_mapper:\
            the mapping between application and machine graph
        :param placement:\
            the optional order in which placements should be examined
        :return: DSG targets (map of placement tuple and filename)
        """
        # pylint: disable=too-many-arguments, too-many-locals
        # pylint: disable=attribute-defined-outside-init
        self._machine = machine
        self._hostname = hostname
        self._report_dir = report_default_directory
        self._write_text = write_text_specs

        # iterate though vertices and call generate_data_spec for each
        # vertex
        targets = DataSpecificationTargets(machine, self._report_dir)

        if placement_order is None:
            placement_order = placements.placements

        progress = ProgressBar(
            placements.n_placements, "Generating data specifications")
        vertices_to_reset = list()
        for placement in progress.over(placement_order):
            # Try to generate the data spec for the placement
            generated = self.__generate_data_spec_for_vertices(
                placement, placement.vertex, targets)

            if generated and isinstance(
                    placement.vertex, AbstractRewritesDataSpecification):
                vertices_to_reset.append(placement.vertex)

            # If the spec wasn't generated directly, and there is an
            # application vertex, try with that
            if not generated and graph_mapper is not None:
                associated_vertex = graph_mapper.get_application_vertex(
                    placement.vertex)
                generated = self.__generate_data_spec_for_vertices(
                    placement, associated_vertex, targets)
                if generated and isinstance(
                        associated_vertex, AbstractRewritesDataSpecification):
                    vertices_to_reset.append(associated_vertex)

        # Ensure that the vertices know their regions have been reloaded
        for vertex in vertices_to_reset:
            vertex.mark_regions_reloaded()

        return targets
def synapse_expander(
        app_graph, graph_mapper, placements, transceiver,
        provenance_file_path, executable_finder):
    """ Run the synapse expander - needs to be done after data has been loaded
    """

    synapse_expander = executable_finder.get_executable_path(SYNAPSE_EXPANDER)
    delay_expander = executable_finder.get_executable_path(DELAY_EXPANDER)

    progress = ProgressBar(len(app_graph.vertices) + 2, "Expanding Synapses")

    # Find the places where the synapse expander and delay receivers should run
    expander_cores = ExecutableTargets()
    for vertex in progress.over(app_graph.vertices, finish_at_end=False):

        # Find population vertices
        if isinstance(
                vertex, (AbstractPopulationVertex, DelayExtensionVertex)):

            # Add all machine vertices of the population vertex to ones
            # that need synapse expansion
            for m_vertex in graph_mapper.get_machine_vertices(vertex):
                vertex_slice = graph_mapper.get_slice(m_vertex)
                if vertex.gen_on_machine(vertex_slice):
                    placement = placements.get_placement_of_vertex(m_vertex)
                    if isinstance(vertex, AbstractPopulationVertex):
                        binary = synapse_expander
                    else:
                        binary = delay_expander
                    expander_cores.add_processor(
                        binary, placement.x, placement.y, placement.p)

    # Launch the delay receivers
    expander_app_id = transceiver.app_id_tracker.get_new_id()
    transceiver.execute_application(expander_cores, expander_app_id)
    progress.update()

    # Wait for everything to finish
    finished = False
    try:
        transceiver.wait_for_cores_to_be_in_state(
            expander_cores.all_core_subsets, expander_app_id,
            [CPUState.FINISHED])
        progress.update()
        finished = True
        _extract_iobuf(expander_cores, transceiver, provenance_file_path)
        progress.end()
    except Exception:
        logger.exception("Synapse expander has failed")
        _handle_failure(
            expander_cores, transceiver, provenance_file_path)
    finally:
        transceiver.stop_application(expander_app_id)
        transceiver.app_id_tracker.free_id(expander_app_id)

        if not finished:
            raise SpynnakerException(
                "The synapse expander failed to complete")
    def allocate_chip_ids(self, machine, graph):
        """ Go through the chips (real and virtual) and allocate keys for each
        """
        progress = ProgressBar(
            graph.n_vertices + machine.n_chips,
            "Allocating virtual identifiers")

        # allocate standard IDs for real chips
        for x, y in progress.over(machine.chip_coordinates, False):
            expected_chip_id = (x << 8) + y
            self._allocate_elements(expected_chip_id, 1)

        # allocate IDs for virtual chips
        for vertex in progress.over(graph.vertices):
            if isinstance(vertex, AbstractVirtualVertex):
                x, y = self._assign_virtual_chip_info(
                    machine, self._get_link_data(machine, vertex))
                vertex.set_virtual_chip_coordinates(x, y)
    def __call__(self, fixed_routes, transceiver, app_id):

        progress_bar = ProgressBar(
            total_number_of_things_to_do=len(fixed_routes),
            string_describing_what_being_progressed="loading fixed routes")

        for chip_x, chip_y in progress_bar.over(fixed_routes.keys()):
            transceiver.load_fixed_route(
                chip_x, chip_y, fixed_routes[(chip_x, chip_y)], app_id)
    def __call__(
            self, machine_graph, application_graph=None,
            provenance_data_objects=None):
        """
        :param machine_graph: The machine graph to inspect
        :param application_graph: The optional application graph
        :param provenance_data_objects: Any existing objects to append to
        """

        if provenance_data_objects is not None:
            prov_items = provenance_data_objects
        else:
            prov_items = list()

        progress = ProgressBar(
            machine_graph.n_vertices +
            machine_graph.n_outgoing_edge_partitions,
            "Getting provenance data from machine graph")
        for vertex in progress.over(machine_graph.vertices, False):
            if isinstance(vertex, AbstractProvidesLocalProvenanceData):
                prov_items.extend(vertex.get_local_provenance_data())
        for partition in progress.over(machine_graph.outgoing_edge_partitions):
            for edge in partition.edges:
                if isinstance(edge, AbstractProvidesLocalProvenanceData):
                    prov_items.extend(edge.get_local_provenance_data())

        if application_graph is not None:
            progress = ProgressBar(
                application_graph.n_vertices +
                application_graph.n_outgoing_edge_partitions,
                "Getting provenance data from application graph")
            for vertex in progress.over(application_graph.vertices, False):
                if isinstance(vertex, AbstractProvidesLocalProvenanceData):
                    prov_items.extend(vertex.get_local_provenance_data())
            for partition in progress.over(
                    application_graph.outgoing_edge_partitions):
                for edge in partition.edges:
                    if isinstance(edge, AbstractProvidesLocalProvenanceData):
                        prov_items.extend(edge.get_local_provenance_data())

        return prov_items
    def __call__(self, router_tables, app_id, transceiver, machine):
        progress = ProgressBar(router_tables.routing_tables,
                               "Loading routing data onto the machine")

        # load each router table that is needed for the application to run into
        # the chips SDRAM
        for table in progress.over(router_tables.routing_tables):
            if (not machine.get_chip_at(table.x, table.y).virtual
                    and table.multicast_routing_entries):
                transceiver.load_multicast_routes(
                    table.x, table.y, table.multicast_routing_entries,
                    app_id=app_id)
    def __call__(self, machine_graph, placements, machine,
                 vertex_to_ethernet_connected_chip_mapping,
                 application_graph=None, graph_mapper=None):
        """
        :param machine_graph: the machine graph instance
        :param placements: the placements
        :param machine: the machine object
        :param application_graph: the application graph
        :param vertex_to_ethernet_connected_chip_mapping: \
            mapping between ethernet connected chips and packet gatherers
        :param graph_mapper: the graph mapper
        :rtype: None
        """
        # pylint: disable=too-many-arguments
        n_app_vertices = 0
        if application_graph is not None:
            n_app_vertices = application_graph.n_vertices

        progress = ProgressBar(
            machine_graph.n_vertices + n_app_vertices,
            "Inserting edges between vertices which require FR speed up "
            "functionality.")

        for vertex in progress.over(machine_graph.vertices, False):
            if isinstance(vertex, ExtraMonitorSupportMachineVertex):
                self._process_vertex(
                    vertex, machine, placements, machine_graph,
                    vertex_to_ethernet_connected_chip_mapping,
                    application_graph, graph_mapper)

        if application_graph is not None:
            for vertex in progress.over(application_graph.vertices, False):
                if isinstance(vertex, ExtraMonitorSupport):
                    machine_verts = graph_mapper.get_machine_vertices(vertex)
                    for machine_vertex in machine_verts:
                        self._process_vertex(
                            machine_vertex, machine, placements, machine_graph,
                            vertex_to_ethernet_connected_chip_mapping,
                            application_graph, graph_mapper)
        progress.end()
    def __call__(self, machine_graph, machine, plan_n_timesteps):
        """

        :param machine_graph: The machine_graph to place
        :type machine_graph:\
            :py:class:`pacman.model.graphs.machine.MachineGraph`
        :param machine:\
            The machine with respect to which to partition the application\
            graph
        :type machine: :py:class:`spinn_machine.Machine`
        :param plan_n_timesteps: number of timesteps to plan for
        :type  plan_n_timesteps: int
        :return: A set of placements
        :rtype: :py:class:`pacman.model.placements.Placements`
        :raise pacman.exceptions.PacmanPlaceException: \
            If something goes wrong with the placement
        """
        # check that the algorithm can handle the constraints
        self._check_constraints(machine_graph.vertices)

        # Sort the vertices into those with and those without
        # placement constraints
        placements = Placements()
        constrained = list()
        unconstrained = set()
        for vertex in machine_graph.vertices:
            if locate_constraints_of_type(
                    vertex.constraints, AbstractPlacerConstraint):
                constrained.append(vertex)
            else:
                unconstrained.add(vertex)

        # Iterate over constrained vertices and generate placements
        progress = ProgressBar(
            machine_graph.n_vertices, "Placing graph vertices")
        resource_tracker = ResourceTracker(
            machine, plan_n_timesteps, self._generate_radial_chips(machine))
        constrained = sort_vertices_by_known_constraints(constrained)
        for vertex in progress.over(constrained, False):
            self._place_vertex(vertex, resource_tracker, machine, placements)

        while unconstrained:
            # Place the subgraph with the overall most connected vertex
            max_connected_vertex = self._find_max_connected_vertex(
                unconstrained, machine_graph)
            self._place_unconstrained_subgraph(
                max_connected_vertex, machine_graph, unconstrained,
                machine, placements, resource_tracker, progress)

        # finished, so stop progress bar and return placements
        progress.end()
        return placements
    def _write_router_provenance_data(
            self, router_tables, machine, txrx, extra_monitor_vertices,
            placements):
        """ Writes the provenance data of the router diagnostics

        :param router_tables: the routing tables generated by PACMAN
        :param machine: the spinnMachine object
        :param txrx: the transceiver object
        :param placements: the placements object
        :param extra_monitor_vertices: list of extra monitor vertices
        """
        # pylint: disable=too-many-arguments
        progress = ProgressBar(machine.n_chips*2, "Getting Router Provenance")

        # acquire diagnostic data
        items = list()
        seen_chips = set()

        # get all extra monitor core data if it exists
        reinjection_data = None
        if extra_monitor_vertices is not None:
            reinjection_data = \
                extra_monitor_vertices[0].get_reinjection_status_for_vertices(
                    placements=placements,
                    extra_monitor_cores_for_data=extra_monitor_vertices,
                    transceiver=txrx)

        for router_table in progress.over(sorted(
                router_tables.routing_tables,
                key=lambda table: (table.x, table.y)), False):
            self._write_router_table_diagnostic(
                txrx, machine, router_table.x, router_table.y, seen_chips,
                router_table, items, reinjection_data)

        for chip in progress.over(sorted(
                machine.chips, key=lambda c: (c.x, c.y))):
            self._write_router_chip_diagnostic(
                txrx, chip, seen_chips, items, reinjection_data)
        return items
    def __get_projection_data(
            self, data_to_get, pre_vertex, post_vertex, connection_holder,
            handle_time_out_configuration):
        # pylint: disable=too-many-arguments, too-many-locals
        ctl = self._spinnaker_control

        # if using extra monitor functionality, locate extra data items
        if ctl.get_generated_output("UsingAdvancedMonitorSupport"):
            extra_monitors = ctl.get_generated_output(
                "MemoryExtraMonitorVertices")
            receivers = ctl.get_generated_output(
                "MemoryMCGatherVertexToEthernetConnectedChipMapping")
            extra_monitor_placements = ctl.get_generated_output(
                "MemoryExtraMonitorToChipMapping")
        else:
            extra_monitors = None
            receivers = None
            extra_monitor_placements = None

        edges = ctl.graph_mapper.get_machine_edges(self._projection_edge)
        progress = ProgressBar(
            edges, "Getting {}s for projection between {} and {}".format(
                data_to_get, pre_vertex.label, post_vertex.label))
        for edge in progress.over(edges):
            placement = ctl.placements.get_placement_of_vertex(
                edge.post_vertex)

            # if using extra monitor data extractor find local receiver
            if extra_monitors is not None:
                receiver = helpful_functions.locate_extra_monitor_mc_receiver(
                    placement_x=placement.x, placement_y=placement.y,
                    machine=ctl.machine,
                    packet_gather_cores_to_ethernet_connection_map=receivers)
                sender_extra_monitor_core = extra_monitor_placements[
                    placement.x, placement.y]
                sender_monitor_place = ctl.placements.get_placement_of_vertex(
                    sender_extra_monitor_core)
            else:
                receiver = None
                sender_monitor_place = None

            connections = post_vertex.get_connections_from_machine(
                ctl.transceiver, placement, edge, ctl.graph_mapper,
                ctl.routing_infos, self._synapse_information,
                ctl.machine_time_step, extra_monitors is not None,
                ctl.placements, receiver, sender_monitor_place,
                extra_monitors, handle_time_out_configuration,
                ctl.fixed_routes)
            if connections is not None:
                connection_holder.add_connections(connections)
        connection_holder.finish()
    def __call__(self, machine, plan_n_timesteps, placements):
        """ see AbstractTagAllocatorAlgorithm.allocate_tags
        :param machine:\
            The machine with respect to which to partition the application\
            graph
        :type machine: :py:class:`spinn_machine.Machine`
        :param plan_n_timesteps: number of timesteps to plan for
        :type  plan_n_timesteps: int
        :param placements:
        :return:
        """

        resource_tracker = ResourceTracker(machine, plan_n_timesteps)

        # Keep track of ports allocated to reverse IP tags and tags that still
        # need a port to be allocated
        ports_to_allocate = dict()
        tags_to_allocate_ports = list()

        # Check that the algorithm can handle the constraints
        progress = ProgressBar(placements.n_placements, "Discovering tags")
        placements_with_tags = list()
        for placement in progress.over(placements.placements):
            self._gather_placements_with_tags(placement, placements_with_tags)

        # Go through and allocate the IP tags and constrained reverse IP tags
        tags = Tags()
        progress = ProgressBar(placements_with_tags, "Allocating tags")
        for placement in progress.over(placements_with_tags):
            self._allocate_tags_for_placement(
                placement, resource_tracker, tags, ports_to_allocate,
                tags_to_allocate_ports)

        # Finally allocate ports to the unconstrained reverse IP tags
        self._allocate_ports_for_reverse_ip_tags(
            tags_to_allocate_ports, ports_to_allocate, tags)

        return list(tags.ip_tags), list(tags.reverse_ip_tags), tags
    def __call__(
            self, machine, n_machine_time_steps, n_samples_per_recording,
            sampling_frequency, time_scale_factor, machine_time_step,
            pre_allocated_resources=None):
        """
        :param pre_allocated_resources: other preallocated resources
        :param machine: the SpiNNaker machine as discovered
        :param n_machine_time_steps: the number of machine\
            time steps used by the simulation during this phase
        :param n_samples_per_recording: how many samples between record entries
        :param sampling_frequency: the frequency of sampling
        :param time_scale_factor: the time scale factor
        :param machine_time_step: the machine time step
        :return: preallocated resources
        """
        # pylint: disable=too-many-arguments

        progress_bar = ProgressBar(
            machine.n_chips, "Preallocating resources for chip power monitor")

        # store how much SDRAM the power monitor uses per core
        resources = ChipPowerMonitorMachineVertex.get_resources(
            n_machine_time_steps=n_machine_time_steps,
            n_samples_per_recording=n_samples_per_recording,
            sampling_frequency=sampling_frequency,
            time_scale_factor=time_scale_factor,
            time_step=machine_time_step)

        # for every Ethernet connected chip, get the resources needed by the
        # live packet gatherers
        sdrams = list()
        cores = list()
        for chip in progress_bar.over(machine.chips):
            sdrams.append(
                SpecificChipSDRAMResource(chip, resources.sdram.get_value()))
            cores.append(CoreResource(chip, 1))

        # create preallocated resource container
        cpm_pre_allocated_resource_container = PreAllocatedResourceContainer(
            specific_sdram_usage=sdrams, core_resources=cores)

        # add other preallocated resources
        if pre_allocated_resources is not None:
            cpm_pre_allocated_resource_container.extend(
                pre_allocated_resources)

        # return preallocated resources
        return cpm_pre_allocated_resource_container
Exemplo n.º 31
0
    def __call__(self,
                 transceiver,
                 executable_targets,
                 executable_finder,
                 provenance_file_path,
                 from_cores="ALL",
                 binary_types=None):

        error_entries = list()
        warn_entries = list()

        # all the cores
        if from_cores == "ALL":
            progress = ProgressBar(len(executable_targets.binaries),
                                   "Extracting IOBUF from the machine")
            for binary in progress.over(executable_targets.binaries):
                core_subsets = executable_targets.get_cores_for_binary(binary)
                self._run_for_core_subsets(core_subsets, binary, transceiver,
                                           provenance_file_path, error_entries,
                                           warn_entries)

        elif from_cores:
            if binary_types:
                # bit of both
                progress = ProgressBar(len(executable_targets.binaries),
                                       "Extracting IOBUF from the machine")
                binaries = executable_finder.get_executable_paths(binary_types)
                iocores = (
                    helpful_functions.convert_string_into_chip_and_core_subset(
                        from_cores))
                for binary in progress.over(executable_targets.binaries):
                    if binary in binaries:
                        core_subsets = executable_targets.get_cores_for_binary(
                            binary)
                    else:
                        core_subsets = iocores.intersect(
                            executable_targets.get_cores_for_binary(binary))
                    if core_subsets:
                        self._run_for_core_subsets(core_subsets, binary,
                                                   transceiver,
                                                   provenance_file_path,
                                                   error_entries, warn_entries)

            else:
                # some hard coded cores
                progress = ProgressBar(len(executable_targets.binaries),
                                       "Extracting IOBUF from the machine")
                iocores = (
                    helpful_functions.convert_string_into_chip_and_core_subset(
                        from_cores))
                for binary in progress.over(executable_targets.binaries):
                    core_subsets = iocores.intersect(
                        executable_targets.get_cores_for_binary(binary))
                    if core_subsets:
                        self._run_for_core_subsets(core_subsets, binary,
                                                   transceiver,
                                                   provenance_file_path,
                                                   error_entries, warn_entries)
        else:
            if binary_types:
                # some binaries
                binaries = executable_finder.get_executable_paths(binary_types)
                progress = ProgressBar(len(binaries),
                                       "Extracting IOBUF from the machine")
                for binary in progress.over(binaries):
                    core_subsets = executable_targets.get_cores_for_binary(
                        binary)
                    self._run_for_core_subsets(core_subsets, binary,
                                               transceiver,
                                               provenance_file_path,
                                               error_entries, warn_entries)
            else:
                # nothing
                pass

        return error_entries, warn_entries
    def __call__(self,
                 transceiver,
                 placements,
                 hostname,
                 report_directory,
                 write_text_specs,
                 application_data_file_path,
                 graph_mapper=None):
        """

        :param transceiver: SpiNNMan transceiver for communication
        :param placements: the list of placements of the machine graph to cores
        :param hostname: the machine name
        :param report_directory: the location where reports are stored
        :param write_text_specs:\
            True if the textual version of the specification is to be written
        :param application_data_file_path:\
            Folder where data specifications should be written to
        :param graph_mapper:\
            the mapping between application and machine graph
        """

        # build file paths for reloaded stuff
        reloaded_dsg_data_files_file_path = \
            helpful_functions.generate_unique_folder_name(
                application_data_file_path, "reloaded_data_regions", "")
        reloaded_dsg_report_files_file_path = \
            helpful_functions.generate_unique_folder_name(
                report_directory, "reloaded_data_regions", "")

        # build new folders
        if not os.path.exists(reloaded_dsg_data_files_file_path):
            os.makedirs(reloaded_dsg_data_files_file_path)
        if not os.path.exists(reloaded_dsg_report_files_file_path):
            os.makedirs(reloaded_dsg_report_files_file_path)

        application_vertices_to_reset = set()

        progress = ProgressBar(placements.n_placements, "Reloading data")
        for placement in progress.over(placements.placements):

            # Try to generate the data spec for the placement
            generated = self._regenerate_data_spec_for_vertices(
                transceiver, placement, placement.vertex, hostname,
                reloaded_dsg_report_files_file_path, write_text_specs,
                reloaded_dsg_data_files_file_path)

            # If the region was regenerated, mark it reloaded
            if generated:
                placement.vertex.mark_regions_reloaded()

            # If the spec wasn't generated directly, and there is an
            # application vertex, try with that
            if not generated and graph_mapper is not None:
                associated_vertex = graph_mapper.get_application_vertex(
                    placement.vertex)
                generated = self._regenerate_data_spec_for_vertices(
                    transceiver, placement, associated_vertex, hostname,
                    reloaded_dsg_report_files_file_path, write_text_specs,
                    reloaded_dsg_data_files_file_path)

                # If the region was regenerated, remember the application
                # vertex for resetting later
                if generated:
                    application_vertices_to_reset.add(associated_vertex)

        # Only reset the application vertices here, otherwise only one
        # machine vertices data will be updated
        for vertex in application_vertices_to_reset:
            vertex.mark_regions_reloaded()
    def _read_back_and_summarise_bit_fields(self, app_graph, transceiver,
                                            placements, default_report_folder,
                                            bit_field_summary_report_name):
        """ summary report of the bitfields that were generated

        :param app_graph: app graph
        :param transceiver: the SPiNNMan instance
        :param placements: The placements
        :param default_report_folder:the file path for where reports are.
        :param bit_field_summary_report_name: the name of the summary file
        :rtype: None
        """
        progress = ProgressBar(
            len(app_graph.vertices),
            "reading back bitfields from chip for summary report")

        chip_packet_count = defaultdict(int)
        chip_redundant_count = defaultdict(int)

        file_path = os.path.join(default_report_folder,
                                 bit_field_summary_report_name)
        with open(file_path, "w") as output:
            # read in for each app vertex that would have a bitfield
            for app_vertex in progress.over(app_graph.vertices):
                local_total = 0
                local_redundant = 0

                # get machine verts
                for vertex in app_vertex.machine_vertices:
                    if isinstance(vertex, AbstractSupportsBitFieldGeneration):
                        placement = placements.get_placement_of_vertex(vertex)

                        # get bitfield address
                        bit_field_address = vertex.bit_field_base_address(
                            transceiver, placement)

                        # read how many bitfields there are
                        n_bit_field_entries, = struct.unpack(
                            "<I",
                            transceiver.read_memory(placement.x, placement.y,
                                                    bit_field_address,
                                                    BYTES_PER_WORD))
                        reading_address = bit_field_address + BYTES_PER_WORD

                        # read in each bitfield
                        for _bit_field_index in range(0, n_bit_field_entries):
                            # master pop key, n words and read pointer
                            _master_pop_key, n_words_to_read, read_pointer = \
                                struct.unpack("<III", transceiver.read_memory(
                                    placement.x, placement.y,
                                    reading_address,
                                    self._BYTES_PER_FILTER))
                            reading_address += self._BYTES_PER_FILTER

                            # get bitfield words
                            bit_field = struct.unpack(
                                "<{}I".format(n_words_to_read),
                                transceiver.read_memory(
                                    placement.x, placement.y, read_pointer,
                                    n_words_to_read * BYTES_PER_WORD))

                            n_neurons = n_words_to_read * self._BITS_IN_A_WORD
                            for neuron_id in range(0, n_neurons):
                                if (self._bit_for_neuron_id(
                                        bit_field, neuron_id) == 0):
                                    chip_redundant_count[placement.x,
                                                         placement.y] += 1
                                    local_redundant += 1
                                chip_packet_count[placement.x,
                                                  placement.y] += 1
                                local_total += 1

                        redundant_packet_percentage = 0
                        if local_total != 0:
                            redundant_packet_percentage = (
                                (100.0 / float(local_total)) *
                                float(local_redundant))

                        output.write(
                            "vertex on {}:{}:{} has total incoming packet "
                            "count of {} and a redundant packet count of {}. "
                            "Making a redundant packet percentage of "
                            "{}\n".format(placement.x, placement.y,
                                          placement.p, local_total,
                                          local_redundant,
                                          redundant_packet_percentage))
                        output.flush()

            output.write("\n\n\n")

            # overall summary
            total_packets = 0
            total_redundant_packets = 0
            for (x, y) in chip_packet_count:
                output.write(
                    "chip {}:{} has a total incoming packet count of {} and "
                    "a redundant packet count of {} given a redundant "
                    "percentage of {} \n".format(
                        x, y, chip_packet_count[(x, y)],
                        chip_redundant_count[(x, y)],
                        ((100.0 / float(chip_packet_count[(x, y)])) *
                         float(chip_redundant_count[(x, y)]))))

                total_packets += chip_packet_count[(x, y)]
                total_redundant_packets += chip_redundant_count[(x, y)]

            percentage = 0.0
            if total_packets:
                percentage = (100.0 * total_redundant_packets) / total_packets

            output.write(
                "overall the application has estimated {} packets flying "
                "around of which {} are redundant at reception. this is "
                "{}% of the packets".format(total_packets,
                                            total_redundant_packets,
                                            percentage))
Exemplo n.º 34
0
    def get_spikes(
            self, label, buffer_manager, region, placements, graph_mapper,
            application_vertex, variable, machine_time_step):
        if variable not in self.__bitfield_variables:
            msg = "Variable {} is not supported, use get_matrix_data".format(
                variable)
            raise ConfigurationException(msg)

        spike_times = list()
        spike_ids = list()

        vertices = graph_mapper.get_machine_vertices(application_vertex)
        missing_str = ""
        progress = ProgressBar(vertices, "Getting spikes for {}".format(label))
        for vertex in progress.over(vertices):
            placement = placements.get_placement_of_vertex(vertex)
            vertex_slice = graph_mapper.get_slice(vertex)

            ms_per_tick = machine_time_step / MICRO_TO_MILLISECOND_CONVERSION
            neurons = self._neurons_recording(variable, vertex_slice)
            neurons_recording = len(neurons)
            if neurons_recording == 0:
                continue

            # Read the spikes
            n_words = int(math.ceil(neurons_recording / BITS_PER_WORD))
            n_bytes = n_words * BYTES_PER_WORD
            n_words_with_timestamp = n_words + 1

            # for buffering output info is taken form the buffer manager
            record_raw, data_missing = buffer_manager.get_data_by_placement(
                    placement, region)
            if data_missing:
                missing_str += "({}, {}, {}); ".format(
                    placement.x, placement.y, placement.p)
            if len(record_raw) > 0:
                raw_data = (
                    numpy.asarray(record_raw, dtype="uint8").view(
                        dtype="<i4")).reshape([-1, n_words_with_timestamp])
            else:
                raw_data = record_raw
            if len(raw_data) > 0:
                record_time = raw_data[:, 0] * float(ms_per_tick)
                spikes = raw_data[:, 1:].byteswap().view("uint8")
                bits = numpy.fliplr(numpy.unpackbits(spikes).reshape(
                    (-1, 32))).reshape((-1, n_bytes * 8))
                time_indices, local_indices = numpy.where(bits == 1)
                if self.__indexes[variable] is None:
                    indices = local_indices + vertex_slice.lo_atom
                    times = record_time[time_indices].reshape((-1))
                    spike_ids.extend(indices)
                    spike_times.extend(times)
                else:
                    for time_indice, local in zip(time_indices, local_indices):
                        if local < neurons_recording:
                            spike_ids.append(neurons[local])
                            spike_times.append(record_time[time_indice])

        if len(missing_str) > 0:
            logger.warning(
                "Population {} is missing spike data in region {} from the"
                " following cores: {}", label, region, missing_str)

        if len(spike_ids) == 0:
            return numpy.zeros((0, 2), dtype="float")

        result = numpy.column_stack((spike_ids, spike_times))
        return result[numpy.lexsort((spike_times, spike_ids))]
Exemplo n.º 35
0
def validate_routes(machine_graph, placements, routing_infos,
                    routing_tables, machine, graph_mapper=None):
    """ Go though the placements given and check that the routing entries\
        within the routing tables support reach the correction destinations\
        as well as not producing any cycles.

    :param machine_graph: the graph
    :param placements: the placements container
    :param routing_infos: the routing info container
    :param routing_tables: \
        the routing tables generated by the routing algorithm
    :param graph_mapper: \
        the mapping between graphs or none if only using a machine graph
    :param machine: the python machine object
    :type machine: spinn_machine.Machine object
    :rtype: None
    :raises PacmanRoutingException: when either no routing table entry is\
        found by the search on a given router, or a cycle is detected
    """
    traffic_multicast = (
        lambda edge: edge.traffic_type == EdgeTrafficType.MULTICAST)
    progress = ProgressBar(
        placements.placements,
        "Verifying the routes from each core travel to the correct locations")
    for placement in progress.over(placements.placements):

        # locate all placements to which this placement/vertex will
        # communicate with for a given key_and_mask and search its
        # determined destinations

        # gather keys and masks per partition
        partitions = machine_graph.\
            get_outgoing_edge_partitions_starting_at_vertex(placement.vertex)

        if graph_mapper is not None:
            n_atoms = graph_mapper.get_slice(placement.vertex).n_atoms
        else:
            n_atoms = 0

        for partition in partitions:
            r_info = routing_infos.get_routing_info_from_partition(
                partition)
            is_continuous = _check_if_partition_has_continuous_keys(partition)
            if not is_continuous:
                logger.warning(
                    "Due to the none continuous nature of the keys in this "
                    "partition {}, we cannot check all atoms will be routed "
                    "correctly, but will check the base key instead",
                    partition)

            destination_placements = OrderedSet()

            # filter for just multicast edges, we don't check other types of
            # edges here.
            out_going_edges = filter(traffic_multicast, partition.edges)

            # for every outgoing edge, locate its destination and store it.
            for outgoing_edge in out_going_edges:
                dest_placement = placements.get_placement_of_vertex(
                    outgoing_edge.post_vertex)
                destination_placements.append(
                    PlacementTuple(x=dest_placement.x,
                                   y=dest_placement.y,
                                   p=dest_placement.p))

            # search for these destinations
            for key_and_mask in r_info.keys_and_masks:
                _search_route(
                    placement, destination_placements, key_and_mask,
                    routing_tables, machine, n_atoms, is_continuous)
Exemplo n.º 36
0
    def __call__(self,
                 router_tables,
                 machine,
                 placements,
                 transceiver,
                 default_report_folder,
                 produce_report,
                 use_timer_cut_off,
                 machine_graph,
                 routing_infos,
                 machine_time_step,
                 time_scale_factor,
                 target_length=None,
                 time_to_try_for_each_iteration=None):
        """
        :param ~.MulticastRoutingTables router_tables:
        :param ~.Machine machine:
        :param ~.Placements placements:
        :param ~.Transceiver transceiver:
        :param str default_report_folder:
        :param bool produce_report:
        :param bool use_timer_cut_off:
        :param ~.MachineGraph machine_graph:
        :param ~.RoutingInfo routing_infos:
        :param int machine_time_step:
        :param int time_scale_factor:
        :param int target_length:
        :param int time_to_try_for_each_iteration:
        :rtype: ~.MulticastRoutingTables
        """

        if target_length is None:
            target_length = self._MAX_SUPPORTED_LENGTH

        if time_to_try_for_each_iteration is None:
            time_to_try_for_each_iteration = self._DEFAULT_TIME_PER_ITERATION

        # create progress bar
        progress = ProgressBar(
            len(router_tables.routing_tables) * 2,
            "Compressing routing Tables with bitfields in host")

        # create report
        report_folder_path = None
        if produce_report:
            report_folder_path = self.generate_report_path(
                default_report_folder)

        # compressed router table
        compressed_pacman_router_tables = MulticastRoutingTables()

        key_atom_map = self.generate_key_to_atom_map(machine_graph,
                                                     routing_infos)

        # holder for the bitfields in
        bit_field_sdram_base_addresses = defaultdict(dict)
        for router_table in progress.over(router_tables.routing_tables, False):
            self.collect_bit_field_sdram_base_addresses(
                router_table.x, router_table.y, machine, placements,
                transceiver, bit_field_sdram_base_addresses)

        # start the routing table choice conversion
        for router_table in progress.over(router_tables.routing_tables):
            self.start_compression_selection_process(
                router_table, produce_report, report_folder_path,
                bit_field_sdram_base_addresses, transceiver, machine_graph,
                placements, machine, target_length,
                time_to_try_for_each_iteration, use_timer_cut_off,
                compressed_pacman_router_tables, key_atom_map)
        # return compressed tables
        return compressed_pacman_router_tables
Exemplo n.º 37
0
    def get_spikes(self, label, buffer_manager, region, placements,
                   application_vertex, variable, machine_time_step):
        """ Read a uint32 mapped to time and neuron IDs from the SpiNNaker\
            machine.

        :param str label: vertex label
        :param buffer_manager: the manager for buffered data
        :type buffer_manager:
            ~spinn_front_end_common.interface.buffer_management.BufferManager
        :param int region: the DSG region ID used for this data
        :param ~pacman.model.placements.Placements placements:
            the placements object
        :param application_vertex:
        :type application_vertex:
            ~pacman.model.graphs.application.ApplicationVertex
        :param str variable:
        :param int machine_time_step: microseconds
        :return:
        :rtype: ~numpy.ndarray(tuple(int,int))
        """
        if variable not in self.__bitfield_variables:
            msg = "Variable {} is not supported, use get_matrix_data".format(
                variable)
            raise ConfigurationException(msg)

        spike_times = list()
        spike_ids = list()

        vertices = application_vertex.machine_vertices
        missing_str = ""
        progress = ProgressBar(vertices, "Getting spikes for {}".format(label))
        for vertex in progress.over(vertices):
            placement = placements.get_placement_of_vertex(vertex)
            vertex_slice = vertex.vertex_slice

            ms_per_tick = machine_time_step / MICRO_TO_MILLISECOND_CONVERSION
            neurons = self._neurons_recording(variable, vertex_slice)
            neurons_recording = len(neurons)
            if neurons_recording == 0:
                continue

            # Read the spikes
            n_words = int(math.ceil(neurons_recording / BITS_PER_WORD))
            n_bytes = n_words * BYTES_PER_WORD
            n_words_with_timestamp = n_words + 1

            # for buffering output info is taken form the buffer manager
            record_raw, data_missing = buffer_manager.get_data_by_placement(
                placement, region)
            if data_missing:
                missing_str += "({}, {}, {}); ".format(placement.x,
                                                       placement.y,
                                                       placement.p)
            if len(record_raw) > 0:
                raw_data = (numpy.asarray(record_raw, dtype="uint8").view(
                    dtype="<i4")).reshape([-1, n_words_with_timestamp])
            else:
                raw_data = record_raw
            if len(raw_data) > 0:
                record_time = raw_data[:, 0] * float(ms_per_tick)
                spikes = raw_data[:, 1:].byteswap().view("uint8")
                bits = numpy.fliplr(
                    numpy.unpackbits(spikes).reshape((-1, 32))).reshape(
                        (-1, n_bytes * 8))
                time_indices, local_indices = numpy.where(bits == 1)
                if self.__indexes[variable] is None:
                    indices = local_indices + vertex_slice.lo_atom
                    times = record_time[time_indices].reshape((-1))
                    spike_ids.extend(indices)
                    spike_times.extend(times)
                else:
                    for time_indice, local in zip(time_indices, local_indices):
                        if local < neurons_recording:
                            spike_ids.append(neurons[local])
                            spike_times.append(record_time[time_indice])

        if len(missing_str) > 0:
            logger.warning(
                "Population {} is missing spike data in region {} from the"
                " following cores: {}", label, region, missing_str)

        if len(spike_ids) == 0:
            return numpy.zeros((0, 2), dtype="float")

        result = numpy.column_stack((spike_ids, spike_times))
        return result[numpy.lexsort((spike_times, spike_ids))]
Exemplo n.º 38
0
    def __call__(self, transceiver, placements, has_ran, provenance_file_path,
                 run_time_ms, machine_time_step):
        """
        :param transceiver: the SpiNNMan interface object
        :param placements: The placements of the vertices
        :param has_ran: token that states that the simulation has ran
        :param provenance_file_path: The location to store the profile data
        :param run_time_ms: runtime in ms
        :param machine_time_step: machine time step in ms
        """

        machine_time_step_ms = machine_time_step / 1000

        if not has_ran:
            raise exceptions.ConfigurationException(
                "This function has been called before the simulation has ran."
                " This is deemed an error, please rectify and try again")

        progress = ProgressBar(placements.n_placements, "Getting profile data")

        # retrieve provenance data from any cores that provide data
        for placement in progress.over(placements.placements):
            if isinstance(placement.vertex, AbstractHasProfileData):

                # get data
                profile_data = placement.vertex.get_profile_data(
                    transceiver, placement)

                if len(profile_data.tags) > 0:

                    max_tag_len = max([len(tag) for tag in profile_data.tags])

                    # write data
                    file_name = os.path.join(
                        provenance_file_path,
                        "{}_{}_{}_profile.txt".format(placement.x, placement.y,
                                                      placement.p))

                    # set mode of the file based off if the file already exists
                    mode = "w"
                    if os.path.exists(file_name):
                        mode = "a"

                    # write profile data to file
                    with open(file_name, mode) as writer:
                        writer.write(
                            "{: <{}s} {: <7s} {: <14s} {: <14s} {: <14s}\n".
                            format("tag", max_tag_len, "n_calls", "mean_ms",
                                   "n_calls_per_ts", "mean_ms_per_ts"))
                        writer.write(
                            "{:-<{}s} {:-<7s} {:-<14s} {:-<14s} {:-<14s}\n".
                            format("", max_tag_len, "", "", "", ""))
                        for tag in profile_data.tags:
                            writer.write(
                                "{: <{}s} {: >7d} {: >14.6f} {: >14.6f} "
                                "{: >14.6f}\n".format(
                                    tag, max_tag_len,
                                    profile_data.get_n_calls(tag),
                                    profile_data.get_mean_ms(tag),
                                    profile_data.get_mean_n_calls_per_ts(
                                        tag, run_time_ms,
                                        machine_time_step_ms),
                                    profile_data.get_mean_ms_per_ts(
                                        tag, run_time_ms,
                                        machine_time_step_ms)))
Exemplo n.º 39
0
    def get_spikes(self, label, buffer_manager, region, placements,
                   application_vertex, base_key_function, machine_time_step):
        """ Get the recorded spikes from the object

        :param str label:
        :param buffer_manager: the buffer manager object
        :type buffer_manager:
            ~spinn_front_end_common.interface.buffer_management.BufferManager
        :param int region:
        :param ~pacman.model.placements.Placements placements:
            the placements object
        :param application_vertex:
        :type application_vertex:
            ~pacman.model.graphs.application.ApplicationVertex
        :param int machine_time_step:
            the time step of the simulation, in microseconds
        :param base_key_function:
        :type base_key_function:
            callable(~pacman.model.graphs.machine.MachineVertex,int)
        :return: A numpy array of 2-element arrays of (neuron_id, time)
            ordered by time, one element per event
        :rtype: ~numpy.ndarray(tuple(int,int))
        """
        # pylint: disable=too-many-arguments
        results = list()
        missing = []
        ms_per_tick = machine_time_step / MICRO_TO_MILLISECOND_CONVERSION
        vertices = application_vertex.machine_vertices
        progress = ProgressBar(vertices, "Getting spikes for {}".format(label))
        for vertex in progress.over(vertices):
            placement = placements.get_placement_of_vertex(vertex)
            vertex_slice = vertex.vertex_slice

            # Read the spikes
            n_buffer_times = 0
            if vertex.send_buffer_times is not None:
                for i in vertex.send_buffer_times:
                    if hasattr(i, "__len__"):
                        n_buffer_times += len(i)
                    else:
                        # assuming this must be a single integer
                        n_buffer_times += 1

            if n_buffer_times > 0:
                raw_spike_data, data_missing = \
                    buffer_manager.get_data_by_placement(placement, region)
                if data_missing:
                    missing.append(placement)
                self._process_spike_data(vertex_slice,
                                         raw_spike_data, ms_per_tick,
                                         base_key_function(vertex), results)

        if missing:
            missing_str = recording_utils.make_missing_string(missing)
            logger.warning(
                "Population {} is missing spike data in region {} from the"
                " following cores: {}", label, region, missing_str)
        if not results:
            return numpy.empty(shape=(0, 2))
        result = numpy.vstack(results)
        return result[numpy.lexsort((result[:, 1], result[:, 0]))]
    def __call__(self,
                 placements,
                 hostname,
                 report_default_directory,
                 write_text_specs,
                 app_data_runtime_folder,
                 machine,
                 graph_mapper=None,
                 placement_order=None):
        """
        :param placements: placements of machine graph to cores
        :param hostname: SpiNNaker machine name
        :param report_default_directory: the location where reports are stored
        :param write_text_specs:\
            True if the textual version of the specification is to be written
        :param app_data_runtime_folder:\
            Folder where data specifications should be written to
        :param machine: the python representation of the SpiNNaker machine
        :param graph_mapper:\
            the mapping between application and machine graph
        :param placement:\
            the optional order in which placements should be examined

        :return: DSG targets (map of placement tuple and filename)
        """
        # pylint: disable=too-many-arguments

        # iterate though vertices and call generate_data_spec for each
        # vertex
        dsg_targets = dict()

        if placement_order is None:
            placement_order = placements.placements

        progress = ProgressBar(placements.n_placements,
                               "Generating data specifications")
        vertices_to_reset = list()
        for placement in progress.over(placement_order):
            # Try to generate the data spec for the placement
            generated = self._generate_data_spec_for_vertices(
                placement, placement.vertex, dsg_targets, hostname,
                report_default_directory, write_text_specs,
                app_data_runtime_folder, machine)

            if generated and isinstance(placement.vertex,
                                        AbstractRewritesDataSpecification):
                vertices_to_reset.append(placement.vertex)

            # If the spec wasn't generated directly, and there is an
            # application vertex, try with that
            if not generated and graph_mapper is not None:
                associated_vertex = graph_mapper.get_application_vertex(
                    placement.vertex)
                generated = self._generate_data_spec_for_vertices(
                    placement, associated_vertex, dsg_targets, hostname,
                    report_default_directory, write_text_specs,
                    app_data_runtime_folder, machine)
                if generated and isinstance(associated_vertex,
                                            AbstractRewritesDataSpecification):
                    vertices_to_reset.append(associated_vertex)

        # Ensure that the vertices know their regions have been reloaded
        for vertex in vertices_to_reset:
            vertex.mark_regions_reloaded()

        return dsg_targets
Exemplo n.º 41
0
def synapse_expander(app_graph, graph_mapper, placements, transceiver,
                     provenance_file_path, executable_finder):
    """ Run the synapse expander - needs to be done after data has been loaded
    """

    synapse_bin = executable_finder.get_executable_path(SYNAPSE_EXPANDER)
    delay_bin = executable_finder.get_executable_path(DELAY_EXPANDER)
    expandable = (AbstractPopulationVertex, DelayExtensionVertex)

    progress = ProgressBar(len(app_graph.vertices) + 2, "Expanding Synapses")

    # Find the places where the synapse expander and delay receivers should run
    expander_cores = ExecutableTargets()
    gen_on_machine_vertices = list()
    for vertex in progress.over(app_graph.vertices, finish_at_end=False):

        # Find population vertices
        if isinstance(vertex, expandable):
            # Add all machine vertices of the population vertex to ones
            # that need synapse expansion
            gen_on_machine = False
            for m_vertex in graph_mapper.get_machine_vertices(vertex):
                vertex_slice = graph_mapper.get_slice(m_vertex)
                if vertex.gen_on_machine(vertex_slice):
                    placement = placements.get_placement_of_vertex(m_vertex)
                    if isinstance(vertex, AbstractPopulationVertex):
                        binary = synapse_bin
                        gen_on_machine = True
                    else:
                        binary = delay_bin
                    expander_cores.add_processor(binary, placement.x,
                                                 placement.y, placement.p)
            if gen_on_machine:
                gen_on_machine_vertices.append(vertex)

    # Launch the delay receivers
    expander_app_id = transceiver.app_id_tracker.get_new_id()
    transceiver.execute_application(expander_cores, expander_app_id)
    progress.update()

    # Wait for everything to finish
    finished = False
    try:
        transceiver.wait_for_cores_to_be_in_state(
            expander_cores.all_core_subsets, expander_app_id,
            [CPUState.FINISHED])
        progress.update()
        finished = True
        _fill_in_connection_data(gen_on_machine_vertices, graph_mapper,
                                 placements, transceiver)
        _extract_iobuf(expander_cores, transceiver, provenance_file_path)
        progress.end()
    except Exception:  # pylint: disable=broad-except
        logger.exception("Synapse expander has failed")
        _handle_failure(expander_cores, transceiver, provenance_file_path)
    finally:
        transceiver.stop_application(expander_app_id)
        transceiver.app_id_tracker.free_id(expander_app_id)

        if not finished:
            raise SpynnakerException("The synapse expander failed to complete")
Exemplo n.º 42
0
    def get_matrix_data(self, label, buffer_manager, region, placements,
                        graph_mapper, application_vertex, variable,
                        n_machine_time_steps):
        """ method for reading a uint32 mapped to time and neuron ids from\
            the SpiNNaker machine

        :param label: vertex label
        :param buffer_manager: the manager for buffered data
        :param region: the dsg region id used for this data
        :param placements: the placements object
        :param graph_mapper: \
            the mapping between application and machine vertices
        :param application_vertex:
        :param variable: PyNN name for the variable (V, gsy_inh etc.)
        :type variable: str
        :param n_machine_time_steps:
        :return:
        """
        if variable == SPIKES:
            msg = "Variable {} is not supported use get_spikes".format(SPIKES)
            raise ConfigurationException(msg)
        vertices = graph_mapper.get_machine_vertices(application_vertex)
        progress = ProgressBar(vertices,
                               "Getting {} for {}".format(variable, label))
        sampling_rate = self._sampling_rates[variable]
        expected_rows = int(math.ceil(n_machine_time_steps / sampling_rate))
        missing_str = ""
        data = None
        indexes = []
        for vertex in progress.over(vertices):
            placement = placements.get_placement_of_vertex(vertex)
            vertex_slice = graph_mapper.get_slice(vertex)
            neurons = self._neurons_recording(variable, vertex_slice)
            n_neurons = len(neurons)
            if n_neurons == 0:
                continue
            indexes.extend(neurons)
            # for buffering output info is taken form the buffer manager
            neuron_param_region_data_pointer, missing_data = \
                buffer_manager.get_data_for_vertex(
                    placement, region)
            record_raw = neuron_param_region_data_pointer.read_all()
            record_length = len(record_raw)

            row_length = self.N_BYTES_FOR_TIMESTAMP + \
                n_neurons * self.N_BYTES_PER_VALUE

            # There is one column for time and one for each neuron recording
            n_rows = record_length // row_length
            # Converts bytes to ints and make a matrix
            record = (numpy.asarray(record_raw,
                                    dtype="uint8").view(dtype="<i4")).reshape(
                                        (n_rows, (n_neurons + 1)))
            # Check if you have the expected data
            if not missing_data and n_rows == expected_rows:
                # Just cut the timestamps off to get the fragment
                fragment = (record[:, 1:] / float(DataType.S1615.scale))
            else:
                missing_str += "({}, {}, {}); ".format(placement.x,
                                                       placement.y,
                                                       placement.p)
                # Start the fragment for this slice empty
                fragment = numpy.empty((expected_rows, n_neurons))
                for i in xrange(0, expected_rows):
                    time = i * sampling_rate
                    # Check if there is data for this timestep
                    local_indexes = numpy.where(record[:, 0] == time)
                    if len(local_indexes[0]) > 0:
                        # Set row to data for that timestep
                        fragment[i] = (record[local_indexes[0], 1:] /
                                       float(DataType.S1615.scale))
                    else:
                        # Set row to nan
                        fragment[i] = numpy.full(n_neurons, numpy.nan)
            if data is None:
                data = fragment
            else:
                # Add the slice fragment on axis 1 which is ids/ channel_index
                data = numpy.append(data, fragment, axis=1)
        if len(missing_str) > 0:
            logger.warn(
                "Population {} is missing recorded data in region {} from the"
                " following cores: {}".format(label, region, missing_str))
        sampling_interval = self.get_neuron_sampling_interval(variable)
        return (data, indexes, sampling_interval)
    def __call__(self,
                 graph,
                 placements,
                 app_id,
                 app_data_runtime_folder,
                 hostname,
                 transceiver=None,
                 graph_mapper=None,
                 uses_advanced_monitors=False,
                 extra_monitor_cores_to_ethernet_connection_map=None,
                 processor_to_app_data_base_address=None,
                 machine=None):
        """
        :param graph: The graph to process
        :param placements: The placements of vertices of the graph
        :param app_id: The ID of the application
        :param app_data_runtime_folder: The location of data files
        :param hostname: The host name of the machine
        :param transceiver:\
            The transceiver to write data using; if None only data files\
            are written
        :param graph_mapper: The optional mapping between graphs
        :param processor_to_app_data_base_address:\
            Optional existing dictionary of processor to base address
        :return: The mapping between processor and addresses allocated
        :rtype: dict(tuple(int,int,int),DataWritten)
        """
        # pylint: disable=too-many-arguments
        if processor_to_app_data_base_address is None:
            processor_to_app_data_base_address = dict()
        progress = ProgressBar(sum(1 for _ in placements.placements),
                               "Writing data")
        self._machine = machine
        self._txrx = transceiver
        self._use_monitors = uses_advanced_monitors
        self._monitor_map = extra_monitor_cores_to_ethernet_connection_map
        self._data_folder = app_data_runtime_folder

        if isinstance(graph, ApplicationGraph):
            for placement in progress.over(placements.placements):
                app_vertex = graph_mapper.get_application_vertex(
                    placement.vertex)
                if not isinstance(app_vertex, AbstractUsesMemoryIO):
                    continue
                # select the mode of writing and therefore buffer size
                write_memory_function, _buf_size = self.__get_write_function(
                    placement.x, placement.y)
                self._write_data_for_vertex(
                    placement, app_vertex, app_id, hostname,
                    processor_to_app_data_base_address, write_memory_function)
        elif isinstance(graph, MachineGraph):
            for placement in progress.over(placements.placements):
                if not isinstance(placement.vertex, AbstractUsesMemoryIO):
                    continue
                # select the mode of writing and therefore buffer size
                write_memory_function, _buf_size = self.__get_write_function(
                    placement.x, placement.y)
                self._write_data_for_vertex(
                    placement, placement.vertex, app_id, hostname,
                    processor_to_app_data_base_address, write_memory_function)

        return processor_to_app_data_base_address
    def spinnaker_based_data_specification_execution(self,
                                                     write_memory_map_report,
                                                     dsg_targets, transceiver,
                                                     app_id):
        """

        :param write_memory_map_report:
        :param dsg_targets:
        :param transceiver:
        :param app_id:
        :return: True
        :rtype: bool
        """

        # create a progress bar for end users
        progress = ProgressBar(dsg_targets, "Loading data specifications")

        dse_app_id = transceiver.app_id_tracker.get_new_id()

        core_subset = CoreSubsets()
        for (x, y, p, label) in progress.over(dsg_targets):
            core_subset.add_processor(x, y, p)

            dse_data_struct_address = transceiver.malloc_sdram(
                x, y, DSE_DATA_STRUCT_SIZE, dse_app_id)

            data_spec_file_path = dsg_targets[x, y, p, label]
            data_spec_file_size = os.path.getsize(data_spec_file_path)

            base_address = transceiver.malloc_sdram(x, y, data_spec_file_size,
                                                    dse_app_id)

            dse_data_struct_data = struct.pack("<4I", base_address,
                                               data_spec_file_size, app_id,
                                               write_memory_map_report)

            transceiver.write_memory(x, y, dse_data_struct_address,
                                     dse_data_struct_data,
                                     len(dse_data_struct_data))

            transceiver.write_memory(x,
                                     y,
                                     base_address,
                                     data_spec_file_path,
                                     is_filename=True)

            # data spec file is written at specific address (base_address)
            # this is encapsulated in a structure with four fields:
            # 1 - data specification base address
            # 2 - data specification file size
            # 3 - future application ID
            # 4 - store data for memory map report (True / False)
            # If the memory map report is going to be produced, the
            # address of the structure is returned in user1
            user_0_address = transceiver.\
                get_user_0_register_address_from_core(x, y, p)

            transceiver.write_memory(x, y, user_0_address,
                                     dse_data_struct_address, 4)

        # Execute the DSE on all the cores
        logger.info("Loading the Data Specification Executor")
        dse_exec = os.path.join(os.path.dirname(data_spec_sender),
                                'data_specification_executor.aplx')
        transceiver.execute_flood(core_subset,
                                  dse_exec,
                                  app_id,
                                  is_filename=True)

        logger.info(
            "Waiting for On-chip Data Specification Executor to complete")
        transceiver.wait_for_cores_to_be_in_state(core_subset, app_id,
                                                  [CPUState.FINISHED])

        transceiver.stop_application(dse_app_id)
        transceiver.app_id_tracker.free_id(dse_app_id)
        logger.info("On-chip Data Specification Executor completed")

        return True
Exemplo n.º 45
0
    def __call__(
            self, routing_tables, transceiver, machine, app_id,
            provenance_file_path, machine_graph, placements, executable_finder,
            read_algorithm_iobuf, produce_report, default_report_folder,
            target_length, routing_infos, time_to_try_for_each_iteration,
            use_timer_cut_off, machine_time_step, time_scale_factor,
            threshold_percentage, executable_targets,
            compress_as_much_as_possible=False, provenance_data_objects=None):
        """ entrance for routing table compression with bit field

        :param ~.MulticastRoutingTables routing_tables:
        :param ~.Transceiver transceiver:
        :param ~.Machine machine:
        :param int app_id:
        :param str provenance_file_path:
        :param ~.MachineGraph machine_graph:
        :param ~.Placements placements:
        :param ~.ExecutableFinder executable_finder:
        :param bool read_algorithm_iobuf:
        :param bool produce_report:
        :param str default_report_folder:
        :param bool use_timer_cut_off:
        :param int machine_time_step:
        :param int time_scale_factor:
        :param int threshold_percentage:
        :param ExecutableTargets executable_targets:
        :param bool compress_as_much_as_possible:
        :param list(ProvenanceDataItem) provenance_data_objects:
        :rtype: tuple(ExecutableTargets,list(ProvenanceDataItem))
        """

        # build provenance data objects
        if provenance_data_objects is not None:
            prov_items = provenance_data_objects
        else:
            prov_items = list()

        if len(routing_tables.routing_tables) == 0:
            return ExecutableTargets(), prov_items

        # new app id for this simulation
        routing_table_compressor_app_id = \
            transceiver.app_id_tracker.get_new_id()

        progress_bar = ProgressBar(
            total_number_of_things_to_do=(
                len(machine_graph.vertices) +
                (len(routing_tables.routing_tables) *
                 self.TIMES_CYCLED_ROUTING_TABLES)),
            string_describing_what_being_progressed=self._PROGRESS_BAR_TEXT)

        # locate data and on_chip_cores to load binary on
        (addresses, matrix_addresses_and_size) = self._generate_addresses(
            machine_graph, placements, transceiver, progress_bar)

        # create executable targets
        (compressor_executable_targets, bit_field_sorter_executable_path,
         bit_field_compressor_executable_path) = self._generate_core_subsets(
            routing_tables, executable_finder, machine, progress_bar,
            executable_targets)

        # load data into sdram
        on_host_chips = self._load_data(
            addresses, transceiver, routing_table_compressor_app_id,
            routing_tables, app_id, machine,
            compress_as_much_as_possible, progress_bar,
            compressor_executable_targets,
            matrix_addresses_and_size, time_to_try_for_each_iteration,
            bit_field_compressor_executable_path,
            bit_field_sorter_executable_path, threshold_percentage)

        # load and run binaries
        system_control_logic.run_system_application(
            compressor_executable_targets,
            routing_table_compressor_app_id, transceiver,
            provenance_file_path, executable_finder,
            read_algorithm_iobuf,
            functools.partial(
                self._check_bit_field_router_compressor_for_success,
                host_chips=on_host_chips,
                sorter_binary_path=bit_field_sorter_executable_path,
                prov_data_items=prov_items),
            [CPUState.FINISHED], True,
            "bit_field_compressor_on_{}_{}_{}.txt",
            [bit_field_sorter_executable_path], progress_bar)

        # start the host side compressions if needed
        if len(on_host_chips) != 0:
            logger.warning(self._ON_HOST_WARNING_MESSAGE, len(on_host_chips))
            progress_bar = ProgressBar(
                total_number_of_things_to_do=len(on_host_chips),
                string_describing_what_being_progressed=self._HOST_BAR_TEXT)
            host_compressor = HostBasedBitFieldRouterCompressor()
            compressed_pacman_router_tables = MulticastRoutingTables()

            key_atom_map = host_compressor.generate_key_to_atom_map(
                machine_graph, routing_infos)

            for (chip_x, chip_y) in progress_bar.over(on_host_chips, False):
                bit_field_sdram_base_addresses = defaultdict(dict)
                host_compressor.collect_bit_field_sdram_base_addresses(
                    chip_x, chip_y, machine, placements, transceiver,
                    bit_field_sdram_base_addresses)

                host_compressor.start_compression_selection_process(
                    router_table=routing_tables.get_routing_table_for_chip(
                        chip_x, chip_y),
                    produce_report=produce_report,
                    report_folder_path=host_compressor.generate_report_path(
                        default_report_folder),
                    bit_field_sdram_base_addresses=(
                        bit_field_sdram_base_addresses),
                    transceiver=transceiver, machine_graph=machine_graph,
                    placements=placements, machine=machine,
                    target_length=target_length,
                    time_to_try_for_each_iteration=(
                        time_to_try_for_each_iteration),
                    use_timer_cut_off=use_timer_cut_off,
                    compressed_pacman_router_tables=(
                        compressed_pacman_router_tables),
                    key_atom_map=key_atom_map)

            # load host compressed routing tables
            for table in compressed_pacman_router_tables.routing_tables:
                if (not machine.get_chip_at(table.x, table.y).virtual
                        and table.multicast_routing_entries):
                    transceiver.clear_multicast_routes(table.x, table.y)
                    transceiver.load_multicast_routes(
                        table.x, table.y, table.multicast_routing_entries,
                        app_id=app_id)

            progress_bar.end()

        return compressor_executable_targets, prov_items
    def __call__(self, transceiver, placements, hostname, report_directory,
                 write_text_specs):
        """
        :param ~.Transceiver transceiver:
        :param ~.Placements placements:
        :param str hostname:
        :param str report_directory:
        :param bool write_text_specs:
        """
        # pylint: disable=too-many-arguments, attribute-defined-outside-init
        self._txrx = transceiver
        self._host = hostname
        self._write_text = write_text_specs

        # build file paths for reloaded stuff
        app_data_dir = generate_unique_folder_name(report_directory,
                                                   "reloaded_data_regions", "")
        if not os.path.exists(app_data_dir):
            os.makedirs(app_data_dir)
        self._data_dir = app_data_dir

        report_dir = None
        if write_text_specs:
            report_dir = generate_unique_folder_name(report_directory,
                                                     "reloaded_data_regions",
                                                     "")
            if not os.path.exists(report_dir):
                os.makedirs(report_dir)
        self._rpt_dir = report_dir

        application_vertices_to_reset = set()

        progress = ProgressBar(placements.n_placements, "Reloading data")
        for placement in progress.over(placements.placements):
            # Try to generate the data spec for the placement
            generated = self._regenerate_data_spec_for_vertices(
                placement, placement.vertex)
            # If the region was regenerated, mark it reloaded
            if generated:
                placement.vertex.mark_regions_reloaded()
                continue

            # If the spec wasn't generated directly, but there is an
            # application vertex, try with that
            app_vertex = placement.vertex.app_vertex
            if app_vertex is not None:
                generated = self._regenerate_data_spec_for_vertices(
                    placement, app_vertex)

                # If the region was regenerated, remember the application
                # vertex for resetting later
                if generated:
                    application_vertices_to_reset.add(app_vertex)

        # Only reset the application vertices here, otherwise only one
        # machine vertex's data per app vertex will be updated
        for app_vertex in application_vertices_to_reset:
            app_vertex.mark_regions_reloaded()

        # App data directory can be removed as should be empty
        os.rmdir(app_data_dir)
Exemplo n.º 47
0
    def get_spikes(self, label, buffer_manager, region, placements,
                   graph_mapper, application_vertex, machine_time_step):

        spike_times = list()
        spike_ids = list()
        ms_per_tick = machine_time_step / 1000.0

        vertices = graph_mapper.get_machine_vertices(application_vertex)
        missing_str = ""
        progress = ProgressBar(vertices, "Getting spikes for {}".format(label))
        for vertex in progress.over(vertices):
            placement = placements.get_placement_of_vertex(vertex)
            vertex_slice = graph_mapper.get_slice(vertex)

            if self._indexes[SPIKES] is None:
                neurons_recording = vertex_slice.n_atoms
            else:
                neurons_recording = sum((index >= vertex_slice.lo_atom
                                         and index <= vertex_slice.hi_atom)
                                        for index in self._indexes[SPIKES])
                if neurons_recording == 0:
                    continue
                if neurons_recording < vertex_slice.n_atoms:
                    # For spikes the overflow position is also returned
                    neurons_recording += 1
            # Read the spikes
            n_words = int(math.ceil(neurons_recording / 32.0))
            n_bytes = n_words * self.N_BYTES_PER_WORD
            n_words_with_timestamp = n_words + 1

            # for buffering output info is taken form the buffer manager
            neuron_param_region_data_pointer, data_missing = \
                buffer_manager.get_data_for_vertex(
                    placement, region)
            if data_missing:
                missing_str += "({}, {}, {}); ".format(placement.x,
                                                       placement.y,
                                                       placement.p)
            record_raw = neuron_param_region_data_pointer.read_all()
            raw_data = (numpy.asarray(record_raw, dtype="uint8").view(
                dtype="<i4")).reshape([-1, n_words_with_timestamp])
            if len(raw_data) > 0:
                record_time = raw_data[:, 0] * float(ms_per_tick)
                spikes = raw_data[:, 1:].byteswap().view("uint8")
                bits = numpy.fliplr(
                    numpy.unpackbits(spikes).reshape((-1, 32))).reshape(
                        (-1, n_bytes * 8))
                time_indices, local_indices = numpy.where(bits == 1)
                if self._indexes[SPIKES] is None:
                    indices = local_indices + vertex_slice.lo_atom
                    times = record_time[time_indices].reshape((-1))
                    spike_ids.extend(indices)
                    spike_times.extend(times)
                else:
                    neurons = self._neurons_recording(SPIKES, vertex_slice)
                    n_neurons = len(neurons)
                    for time_indice, local in zip(time_indices, local_indices):
                        if local < n_neurons:
                            spike_ids.append(neurons[local])
                            spike_times.append(record_time[time_indice])

        if len(missing_str) > 0:
            logger.warn(
                "Population {} is missing spike data in region {} from the"
                " following cores: {}".format(label, region, missing_str))

        if len(spike_ids) == 0:
            return numpy.zeros((0, 2), dtype="float")

        result = numpy.column_stack((spike_ids, spike_times))
        return result[numpy.lexsort((spike_times, spike_ids))]
Exemplo n.º 48
0
    def __call__(self, transceiver, machine, app_id, dsg_targets):
        """

        :param machine: the python representation of the spinnaker machine
        :param transceiver: the spinnman instance
        :param app_id: the application ID of the simulation
        :param dsg_targets: map of placement to file path

        :return: map of placement and dsg data, and loaded data flag.
        """
        processor_to_app_data_base_address = dict()

        # create a progress bar for end users
        progress = ProgressBar(
            dsg_targets, "Executing data specifications and loading data")

        for ((x, y, p),
             data_spec_file_path) in progress.over(dsg_targets.iteritems()):

            # build specification reader
            data_spec_file_path = dsg_targets[x, y, p]
            data_spec_reader = FileDataReader(data_spec_file_path)

            # maximum available memory
            # however system updates the memory available
            # independently, so the check on the space available actually
            # happens when memory is allocated
            chip = machine.get_chip_at(x, y)
            memory_available = chip.sdram.size

            # generate data spec executor
            executor = DataSpecificationExecutor(data_spec_reader,
                                                 memory_available)

            # run data spec executor
            try:
                # bytes_used_by_spec, bytes_written_by_spec = \
                executor.execute()
            except DataSpecificationException as e:
                logger.error(
                    "Error executing data specification for {}, {}, {}".format(
                        x, y, p))
                raise e

            bytes_used_by_spec = executor.get_constructed_data_size()

            # allocate memory where the app data is going to be written
            # this raises an exception in case there is not enough
            # SDRAM to allocate
            start_address = transceiver.malloc_sdram(x, y, bytes_used_by_spec,
                                                     app_id)

            # Write the header and pointer table and load it
            header = executor.get_header()
            pointer_table = executor.get_pointer_table(start_address)
            data_to_write = numpy.concatenate(
                (header, pointer_table)).tostring()
            transceiver.write_memory(x, y, start_address, data_to_write)
            bytes_written_by_spec = len(data_to_write)

            # Write each region
            for region_id in range(constants.MAX_MEM_REGIONS):
                region = executor.get_region(region_id)
                if region is not None:

                    max_pointer = region.max_write_pointer
                    if not region.unfilled and max_pointer > 0:

                        # Get the data up to what has been written
                        data = region.region_data[:max_pointer]

                        # Write the data to the position
                        position = pointer_table[region_id]
                        transceiver.write_memory(x, y, position, data)
                        bytes_written_by_spec += len(data)

            # set user 0 register appropriately to the application data
            user_0_address = \
                transceiver.get_user_0_register_address_from_core(x, y, p)
            start_address_encoded = \
                buffer(struct.pack("<I", start_address))
            transceiver.write_memory(x, y, user_0_address,
                                     start_address_encoded)

            # write information for the memory map report
            processor_to_app_data_base_address[x, y, p] = {
                'start_address': start_address,
                'memory_used': bytes_used_by_spec,
                'memory_written': bytes_written_by_spec
            }

        return processor_to_app_data_base_address, True
    def __call__(
            self, routing_tables, transceiver, machine, app_id,
            provenance_file_path, compress_only_when_needed=True,
            compress_as_much_as_possible=False):
        """
        :param routing_tables: the memory routing tables to be compressed
        :param transceiver: the spinnman interface
        :param machine: the SpiNNaker machine representation
        :param app_id: the application ID used by the main application
        :param provenance_file_path: the path to where to write the data
        :return: flag stating routing compression and loading has been done
        """
        # pylint: disable=too-many-arguments

        # build progress bar
        progress = ProgressBar(
            len(routing_tables.routing_tables) + 2,
            "Running routing table compression on chip")
        compressor_app_id = transceiver.app_id_tracker.get_new_id()

        # figure size of SDRAM needed for each chip for storing the routing
        # table
        for routing_table in progress.over(routing_tables, False):
            self._load_routing_table(
                routing_table, transceiver, app_id, compressor_app_id,
                compress_only_when_needed, compress_as_much_as_possible)

        # load the router compressor executable
        executable_targets = self._load_executables(
            routing_tables, compressor_app_id, transceiver, machine)

        # update progress bar
        progress.update()

        # Wait for the executable to finish
        succeeded = False
        try:
            transceiver.wait_for_cores_to_be_in_state(
                executable_targets.all_core_subsets, compressor_app_id,
                [CPUState.FINISHED])
            succeeded = True
        finally:
            # get the debug data
            if not succeeded:
                self._handle_failure(
                    executable_targets, transceiver, provenance_file_path,
                    compressor_app_id)

        # Check if any cores have not completed successfully
        self._check_for_success(
            executable_targets, transceiver,
            provenance_file_path, compressor_app_id)

        # update progress bar
        progress.update()

        # stop anything that's associated with the compressor binary
        transceiver.stop_application(compressor_app_id)
        transceiver.app_id_tracker.free_id(compressor_app_id)

        # update the progress bar
        progress.end()
    def __call__(self, machine_graph, n_keys_map, routing_tables):
        """
        :param MachineGraph machine_graph:
        :param AbstractMachinePartitionNKeysMap n_keys_map:
        :param MulticastRoutingTableByPartition routing_tables:
        :rtype: RoutingInfo
        """
        # check that this algorithm supports the constraints
        check_algorithm_can_support_constraints(
            constrained_vertices=machine_graph.outgoing_edge_partitions,
            supported_constraints=[
                FixedMaskConstraint, FixedKeyAndMaskConstraint,
                ContiguousKeyRangeContraint
            ],
            abstract_constraint_type=AbstractKeyAllocatorConstraint)

        # verify that no edge has more than 1 of a constraint ,and that
        # constraints are compatible
        check_types_of_edge_constraint(machine_graph)

        routing_infos = RoutingInfo()

        # Get the edges grouped by those that require the same key
        (fixed_keys, _shared_keys, fixed_masks, fixed_fields, continuous,
         noncontinuous) = get_mulitcast_edge_groups(machine_graph)

        # Even non-continuous keys will be continuous
        continuous.extend(noncontinuous)

        # Go through the groups and allocate keys
        progress = ProgressBar(machine_graph.n_outgoing_edge_partitions,
                               "Allocating routing keys")

        # allocate the groups that have fixed keys
        for group in progress.over(fixed_keys, False):
            # Get any fixed keys and masks from the group and attempt to
            # allocate them
            fixed_mask = None
            fixed_key_and_mask_constraint = locate_constraints_of_type(
                group.constraints, FixedKeyAndMaskConstraint)[0]

            # attempt to allocate them
            self._allocate_fixed_keys_and_masks(
                fixed_key_and_mask_constraint.keys_and_masks, fixed_mask)

            # update the pacman data objects
            self._update_routing_objects(
                fixed_key_and_mask_constraint.keys_and_masks, routing_infos,
                group)
            continuous.remove(group)

        for group in progress.over(fixed_masks, False):
            # get mask and fields if need be
            fixed_mask = locate_constraints_of_type(
                group.constraints, FixedMaskConstraint)[0].mask

            fields = None
            if group in fixed_fields:
                fields = locate_constraints_of_type(
                    group.constraints, FixedKeyFieldConstraint)[0].fields
                fixed_fields.remove(group)

            # try to allocate
            keys_and_masks = self._allocate_keys_and_masks(
                fixed_mask, fields, n_keys_map.n_keys_for_partition(group))

            # update the pacman data objects
            self._update_routing_objects(keys_and_masks, routing_infos, group)
            continuous.remove(group)

        for group in progress.over(fixed_fields, False):
            fields = locate_constraints_of_type(
                group.constraints, FixedKeyFieldConstraint)[0].fields

            # try to allocate
            keys_and_masks = self._allocate_keys_and_masks(
                None, fields, n_keys_map.n_keys_for_partition(group))

            # update the pacman data objects
            self._update_routing_objects(keys_and_masks, routing_infos, group)
            continuous.remove(group)

        # Sort the rest of the groups, using the routing tables for guidance
        # Group partitions by those which share routes in any table
        partition_groups = OrderedDict()
        routers = reversed(
            sorted(
                routing_tables.get_routers(),
                key=lambda item: len(
                    routing_tables.get_entries_for_router(item[0], item[1]))))
        for x, y in routers:

            # Find all partitions that share a route in this table
            partitions_by_route = defaultdict(OrderedSet)
            routing_table = routing_tables.get_entries_for_router(x, y)
            for partition, entry in iteritems(routing_table):
                if partition in continuous:
                    entry_hash = sum(1 << i for i in entry.link_ids)
                    entry_hash += sum(1 << (i + 6)
                                      for i in entry.processor_ids)
                    partitions_by_route[entry_hash].add(partition)

            for entry_hash, partitions in iteritems(partitions_by_route):
                found_groups = list()
                for partition in partitions:
                    if partition in partition_groups:
                        found_groups.append(partition_groups[partition])

                if not found_groups:
                    # If no group was found, create a new one
                    for partition in partitions:
                        partition_groups[partition] = partitions

                elif len(found_groups) == 1:
                    # If a single other group was found, merge it
                    for partition in partitions:
                        found_groups[0].add(partition)
                        partition_groups[partition] = found_groups[0]

                else:
                    # Merge the groups
                    new_group = partitions
                    for group in found_groups:
                        for partition in group:
                            new_group.add(partition)
                    for partition in new_group:
                        partition_groups[partition] = new_group

        # Sort partitions by largest group
        continuous = list(
            OrderedSet(tuple(group) for group in itervalues(partition_groups)))

        for group in reversed(sorted(continuous, key=len)):
            for partition in progress.over(group, False):
                keys_and_masks = self._allocate_keys_and_masks(
                    None, None, n_keys_map.n_keys_for_partition(partition))

                # update the pacman data objects
                self._update_routing_objects(keys_and_masks, routing_infos,
                                             partition)

        progress.end()
        return routing_infos