コード例 #1
0
ファイル: v_recorder.py プロジェクト: SpikeFrame/sPyNNaker
    def get_v(self, label, buffer_manager, region, state_region, placements,
              graph_mapper, partitionable_vertex):

        subvertices = \
            graph_mapper.get_subvertices_from_vertex(partitionable_vertex)

        ms_per_tick = self._machine_time_step / 1000.0

        data = list()
        missing_str = ""

        progress_bar = \
            ProgressBar(len(subvertices),
                        "Getting membrane voltage for {}".format(label))

        for subvertex in subvertices:

            vertex_slice = graph_mapper.get_subvertex_slice(subvertex)
            placement = placements.get_placement_of_subvertex(subvertex)

            x = placement.x
            y = placement.y
            p = placement.p

            # for buffering output info is taken form the buffer manager
            neuron_param_region_data_pointer, missing_data =\
                buffer_manager.get_data_for_vertex(
                    placement, region, state_region)
            if missing_data:
                missing_str += "({}, {}, {}); ".format(x, y, p)
            record_raw = neuron_param_region_data_pointer.read_all()
            record_length = len(record_raw)
            n_rows = record_length / ((vertex_slice.n_atoms + 1) * 4)
            record = (numpy.asarray(record_raw, dtype="uint8").
                      view(dtype="<i4")).reshape((n_rows,
                                                  (vertex_slice.n_atoms + 1)))
            split_record = numpy.array_split(record, [1, 1], 1)
            record_time = numpy.repeat(
                split_record[0] * float(ms_per_tick), vertex_slice.n_atoms, 1)
            record_ids = numpy.tile(
                numpy.arange(vertex_slice.lo_atom, vertex_slice.hi_atom + 1),
                len(record_time)).reshape((-1, vertex_slice.n_atoms))
            record_membrane_potential = split_record[2] / 32767.0

            part_data = numpy.dstack(
                [record_ids, record_time, record_membrane_potential])
            part_data = numpy.reshape(part_data, [-1, 3])
            data.append(part_data)
            progress_bar.update()

        progress_bar.end()
        if len(missing_str) > 0:
            logger.warn(
                "Population {} is missing membrane voltage data in region {}"
                " from the following cores: {}".format(
                    label, region, missing_str))
        data = numpy.vstack(data)
        order = numpy.lexsort((data[:, 1], data[:, 0]))
        result = data[order]
        return result
コード例 #2
0
    def __call__(self, partitioned_graph, placements, buffer_manager,
                 ran_token):

        if not ran_token:
            raise exceptions.ConfigurationException(
                "The ran token has not been set")

        # Count the regions to be read
        n_regions_to_read = 0
        for vertex in partitioned_graph.subvertices:
            if isinstance(vertex, AbstractReceiveBuffersToHost):
                n_regions_to_read += len(vertex.get_buffered_regions())

        progress_bar = ProgressBar(n_regions_to_read,
                                   "Extracting buffers from the last run")

        # Read back the regions
        for vertex in partitioned_graph.subvertices:
            if isinstance(vertex, AbstractReceiveBuffersToHost):
                placement = placements.get_placement_of_subvertex(vertex)
                state_region = vertex.get_buffered_state_region()
                for region in vertex.get_buffered_regions():
                    buffer_manager.get_data_for_vertex(placement, region,
                                                       state_region)
                    progress_bar.update()
        progress_bar.end()
コード例 #3
0
ファイル: v_recorder.py プロジェクト: lmateev/sPyNNaker
    def get_v(self, label, buffer_manager, region, state_region, placements,
              graph_mapper, partitionable_vertex):

        subvertices = \
            graph_mapper.get_subvertices_from_vertex(partitionable_vertex)

        ms_per_tick = self._machine_time_step / 1000.0

        data = list()
        missing_str = ""

        progress_bar = \
            ProgressBar(len(subvertices),
                        "Getting membrane voltage for {}".format(label))

        for subvertex in subvertices:

            vertex_slice = graph_mapper.get_subvertex_slice(subvertex)
            placement = placements.get_placement_of_subvertex(subvertex)

            x = placement.x
            y = placement.y
            p = placement.p

            # for buffering output info is taken form the buffer manager
            neuron_param_region_data_pointer, missing_data =\
                buffer_manager.get_data_for_vertex(
                    placement, region, state_region)
            if missing_data:
                missing_str += "({}, {}, {}); ".format(x, y, p)
            record_raw = neuron_param_region_data_pointer.read_all()
            record_length = len(record_raw)
            n_rows = record_length / ((vertex_slice.n_atoms + 1) * 4)
            record = (numpy.asarray(record_raw,
                                    dtype="uint8").view(dtype="<i4")).reshape(
                                        (n_rows, (vertex_slice.n_atoms + 1)))
            split_record = numpy.array_split(record, [1, 1], 1)
            record_time = numpy.repeat(split_record[0] * float(ms_per_tick),
                                       vertex_slice.n_atoms, 1)
            record_ids = numpy.tile(
                numpy.arange(vertex_slice.lo_atom, vertex_slice.hi_atom + 1),
                len(record_time)).reshape((-1, vertex_slice.n_atoms))
            record_membrane_potential = split_record[2] / 32767.0

            part_data = numpy.dstack(
                [record_ids, record_time, record_membrane_potential])
            part_data = numpy.reshape(part_data, [-1, 3])
            data.append(part_data)
            progress_bar.update()

        progress_bar.end()
        if len(missing_str) > 0:
            logger.warn(
                "Population {} is missing membrane voltage data in region {}"
                " from the following cores: {}".format(label, region,
                                                       missing_str))
        data = numpy.vstack(data)
        order = numpy.lexsort((data[:, 1], data[:, 0]))
        result = data[order]
        return result
コード例 #4
0
    def __call__(self,
                 machine,
                 partitionable_graph=None,
                 partitioned_graph=None):
        """

        :param partitionable_graph:
        :param partitioned_graph:
        :param machine:
        :return:
        """
        if partitionable_graph is not None:

            # Go through the groups and allocate keys
            progress_bar = ProgressBar(
                (len(partitionable_graph.vertices) + len(list(machine.chips))),
                "Allocating virtual identifiers")
        elif partitioned_graph is not None:

            # Go through the groups and allocate keys
            progress_bar = ProgressBar((len(partitioned_graph.subvertices) +
                                        len(list(machine.chips))),
                                       "Allocating virtual identifiers")
        else:
            progress_bar = ProgressBar(len(list(machine.chips)),
                                       "Allocating virtual identifiers")

        # allocate standard ids for real chips
        for chip in machine.chips:
            expected_chip_id = (chip.x << 8) + chip.y
            self._allocate_elements(expected_chip_id, 1)
            progress_bar.update()

        if partitionable_graph is not None:

            # allocate ids for virtual chips
            for vertex in partitionable_graph.vertices:
                if isinstance(vertex, AbstractVirtualVertex):
                    link = vertex.spinnaker_link_id
                    virtual_x, virtual_y, real_x, real_y, real_link = \
                        self._assign_virtual_chip_info(machine, link)
                    vertex.set_virtual_chip_coordinates(
                        virtual_x, virtual_y, real_x, real_y, real_link)
                progress_bar.update()
            progress_bar.end()
        elif partitioned_graph is not None:

            # allocate ids for virtual chips
            for vertex in partitioned_graph.subvertices:
                if isinstance(vertex, VirtualPartitionedVertex):
                    link = vertex.spinnaker_link_id
                    virtual_x, virtual_y, real_x, real_y, real_link = \
                        self._assign_virtual_chip_info(machine, link)
                    vertex.set_virtual_chip_coordinates(
                        virtual_x, virtual_y, real_x, real_y, real_link)
                progress_bar.update()
            progress_bar.end()

        return {"machine": machine}
コード例 #5
0
    def __call__(self, report_folder, connection_holder, dsg_targets):
        """ converts synaptic matrix for every application edge.
        """

        # Update the print options to display everything
        import numpy
        print_opts = numpy.get_printoptions()
        numpy.set_printoptions(threshold=numpy.nan)

        if dsg_targets is None:
            raise SynapticConfigurationException(
                "dsg targets should not be none, used as a check for "
                "connection holder data to be generated")

        # generate folder for synaptic reports
        top_level_folder = os.path.join(report_folder,
                                        "synaptic_matrix_reports")
        if not os.path.exists(top_level_folder):
            os.mkdir(top_level_folder)

        # create progress bar
        progress = ProgressBar(len(connection_holder.keys()),
                               "Generating synaptic matrix reports")

        # for each application edge, write matrix in new file
        for application_edge, _ in connection_holder.keys():

            # only write matrix's for edges which have matrix's
            if isinstance(application_edge, ProjectionApplicationEdge):

                # figure new file name
                file_name = os.path.join(
                    top_level_folder,
                    "synaptic_matrix_for_application_edge_{}".format(
                        application_edge.label))

                # open writer
                output = None
                try:
                    output = open(file_name, "w")
                except IOError:
                    logger.error("Generate_placement_reports: Can't open file"
                                 " {} for writing.".format(file_name))

                # write all data for all synapse_information's in same file
                for info in application_edge.synapse_information:
                    this_connection_holder = connection_holder[(
                        application_edge, info)]
                    output.write("{}".format(this_connection_holder))
                output.flush()
                output.close()

            progress.update()
        progress.end()

        # Reset the print options
        numpy.set_printoptions(**print_opts)
コード例 #6
0
    def get_spikes(self, label, buffer_manager, region, state_region,
                   placements, graph_mapper, partitionable_vertex,
                   base_key_function):

        results = list()
        missing_str = ""
        ms_per_tick = self._machine_time_step / 1000.0
        subvertices = \
            graph_mapper.get_subvertices_from_vertex(partitionable_vertex)
        progress_bar = ProgressBar(len(subvertices),
                                   "Getting spikes for {}".format(label))

        for subvertex in subvertices:

            placement = placements.get_placement_of_subvertex(subvertex)
            subvertex_slice = graph_mapper.get_subvertex_slice(subvertex)

            x = placement.x
            y = placement.y
            p = placement.p

            # Read the spikes
            raw_spike_data, data_missing = \
                buffer_manager.get_data_for_vertex(
                    placement, region, state_region)
            if data_missing:
                missing_str += "({}, {}, {}); ".format(x, y, p)
            spike_data = str(raw_spike_data.read_all())
            number_of_bytes_written = len(spike_data)

            offset = 0
            while offset < number_of_bytes_written:
                eieio_header = EIEIODataHeader.from_bytestring(
                    spike_data, offset)
                offset += eieio_header.size
                timestamp = eieio_header.payload_base * ms_per_tick
                timestamps = numpy.repeat([timestamp], eieio_header.count)
                keys = numpy.frombuffer(
                    spike_data, dtype="<u4", count=eieio_header.count,
                    offset=offset)
                neuron_ids = ((keys - base_key_function(subvertex)) +
                              subvertex_slice.lo_atom)
                offset += eieio_header.count * 4
                results.append(numpy.dstack((neuron_ids, timestamps))[0])
            progress_bar.update()

        progress_bar.end()
        if len(missing_str) > 0:
            logger.warn(
                "Population {} is missing spike data in region {} from the"
                " following cores: {}".format(label, region, missing_str))
        if len(results) != 0:
            result = numpy.vstack(results)
            result = result[numpy.lexsort((result[:, 1], result[:, 0]))]
        else:
            result = []
        return result
コード例 #7
0
    def __call__(
            self, partitioned_graph, user_create_database, tags,
            runtime, machine, time_scale_factor, machine_time_step,
            placements, routing_infos, router_tables, database_directory,
            create_atom_to_event_id_mapping=False, partitionable_graph=None,
            graph_mapper=None):

        self._writer = DatabaseWriter(database_directory)
        self._user_create_database = user_create_database

        # add database generation if requested
        self._needs_database = \
            self._writer.auto_detect_database(partitioned_graph)
        if ((self._user_create_database == "None" and self._needs_database) or
                self._user_create_database == "True"):

            if (partitionable_graph is not None and
                    len(partitionable_graph.vertices) != 0):
                database_progress = ProgressBar(11, "Creating database")
            else:
                database_progress = ProgressBar(10, "Creating database")

            self._writer.add_system_params(
                time_scale_factor, machine_time_step, runtime)
            database_progress.update()
            self._writer.add_machine_objects(machine)
            database_progress.update()
            if (partitionable_graph is not None and
                    len(partitionable_graph.vertices) != 0):
                self._writer.add_partitionable_vertices(partitionable_graph)
                database_progress.update()
            self._writer.add_partitioned_vertices(
                partitioned_graph, graph_mapper, partitionable_graph)
            database_progress.update()
            self._writer.add_placements(placements, partitioned_graph)
            database_progress.update()
            self._writer.add_routing_infos(
                routing_infos, partitioned_graph)
            database_progress.update()
            self._writer.add_routing_tables(router_tables)
            database_progress.update()
            self._writer.add_tags(partitioned_graph, tags)
            database_progress.update()
            if (graph_mapper is not None and
                    partitionable_graph is not None and
                    create_atom_to_event_id_mapping):
                self._writer.create_atom_to_event_id_mapping(
                    graph_mapper=graph_mapper,
                    partitionable_graph=partitionable_graph,
                    partitioned_graph=partitioned_graph,
                    routing_infos=routing_infos)
            database_progress.update()
            database_progress.update()
            database_progress.end()

        return {"database_interface": self,
                "database_file_path": self.database_file_path}
コード例 #8
0
    def get_spikes(self, label, buffer_manager, region, placements,
                   graph_mapper, application_vertex, base_key_function,
                   machine_time_step):

        results = list()
        missing_str = ""
        ms_per_tick = machine_time_step / 1000.0
        vertices = \
            graph_mapper.get_machine_vertices(application_vertex)
        progress_bar = ProgressBar(len(vertices),
                                   "Getting spikes for {}".format(label))

        for vertex in vertices:

            placement = placements.get_placement_of_vertex(vertex)
            vertex_slice = graph_mapper.get_slice(vertex)

            x = placement.x
            y = placement.y
            p = placement.p

            # Read the spikes
            raw_spike_data, data_missing = \
                buffer_manager.get_data_for_vertex(placement, region)
            if data_missing:
                missing_str += "({}, {}, {}); ".format(x, y, p)
            spike_data = str(raw_spike_data.read_all())
            number_of_bytes_written = len(spike_data)

            offset = 0
            while offset < number_of_bytes_written:
                eieio_header = EIEIODataHeader.from_bytestring(
                    spike_data, offset)
                offset += eieio_header.size
                timestamp = eieio_header.payload_base * ms_per_tick
                timestamps = numpy.repeat([timestamp], eieio_header.count)
                keys = numpy.frombuffer(spike_data,
                                        dtype="<u4",
                                        count=eieio_header.count,
                                        offset=offset)
                neuron_ids = ((keys - base_key_function(vertex)) +
                              vertex_slice.lo_atom)
                offset += eieio_header.count * 4
                results.append(numpy.dstack((neuron_ids, timestamps))[0])
            progress_bar.update()

        progress_bar.end()
        if len(missing_str) > 0:
            logger.warn(
                "Population {} is missing spike data in region {} from the"
                " following cores: {}".format(label, region, missing_str))
        if len(results) != 0:
            result = numpy.vstack(results)
            result = result[numpy.lexsort((result[:, 1], result[:, 0]))]
        else:
            result = []
        return result
コード例 #9
0
    def __call__(self, report_folder, application_graph):
        """

        :param report_folder: the report folder to put figure into
        :param application_graph: the app graph
        :rtype: None
        """
        try:
            import graphviz
        except:
            raise SpynnakerException(
                "graphviz is required to use this report.  Please install"
                " graphviz if you want to use this report.")

        # create holders for data
        vertex_holders = dict()
        dot_diagram = graphviz.Digraph(
            comment="The graph of the network in graphical form")

        # build progress bar for the vertices, edges, and rendering
        progress_bar = ProgressBar(
            application_graph.n_vertices +
            application_graph.n_outgoing_edge_partitions + 1,
            "generating the graphical representation of the neural network")

        # write vertices into dot diagram
        vertex_counter = 0
        for vertex in application_graph.vertices:
            dot_diagram.node(
                "{}".format(vertex_counter),
                "{} ({} neurons)".format(vertex.label, vertex.n_atoms))
            vertex_holders[vertex] = vertex_counter
            vertex_counter += 1
            progress_bar.update()

        # write edges into dot diagram
        for partition in application_graph.outgoing_edge_partitions:
            for edge in partition.edges:
                source_vertex_id = vertex_holders[edge.pre_vertex]
                dest_vertex_id = vertex_holders[edge.post_vertex]
                if isinstance(edge, ProjectionApplicationEdge):
                    for synapse_info in edge.synapse_information:
                        dot_diagram.edge(
                            "{}".format(source_vertex_id),
                            "{}".format(dest_vertex_id),
                            "{}".format(synapse_info.connector))
                else:
                    dot_diagram.edge(
                        "{}".format(source_vertex_id),
                        "{}".format(dest_vertex_id))
            progress_bar.update()

        # write dot file and generate pdf
        file_to_output = os.path.join(report_folder, "network_graph.gv")
        dot_diagram.render(file_to_output, view=False)
        progress_bar.update()
        progress_bar.end()
コード例 #10
0
def router_report_from_paths(
        report_folder, routing_tables, routing_infos, hostname,
        partitioned_graph, placements, machine):
    """ Generates a text file of routing paths

    :param routing_tables:
    :param report_folder:
    :param hostname:
    :param routing_infos:
    :param partitioned_graph:
    :param placements:
    :param machine:
    :return:
    """
    file_name = os.path.join(report_folder, "edge_routing_info.rpt")
    f_routing = None
    try:
        f_routing = open(file_name, "w")
    except IOError:
        logger.error("Generate_routing_reports: Can't open file {} for "
                     "writing.".format(file_name))

    f_routing.write("        Edge Routing Report\n")
    f_routing.write("        ===================\n\n")
    time_date_string = time.strftime("%c")
    f_routing.write("Generated: {}".format(time_date_string))
    f_routing.write(" for target machine '{}'".format(hostname))
    f_routing.write("\n\n")

    progress_bar = ProgressBar(len(partitioned_graph.subedges),
                               "Generating Routing path report")
    for edge in partitioned_graph.subedges:
        source_placement = placements.get_placement_of_subvertex(
            edge.pre_subvertex)
        destination_placement = placements.get_placement_of_subvertex(
            edge.post_subvertex)
        partition = partitioned_graph.get_partition_of_subedge(edge)
        key_and_mask = routing_infos.get_keys_and_masks_from_partition(
            partition)[0]
        path, number_of_entries = _search_route(
            source_placement, destination_placement, key_and_mask,
            routing_tables, machine)
        text = "**** SubEdge '{}', from vertex: '{}' to vertex: '{}'".format(
            edge.label, edge.pre_subvertex.label, edge.post_subvertex.label)
        text += " Takes path \n {}".format(path)
        f_routing.write(text)
        f_routing.write("\n")
        text = "Route length: {}\n".format(number_of_entries)
        f_routing.write(text)

        # End one entry:
        f_routing.write("\n")
        progress_bar.update()
    f_routing.flush()
    f_routing.close()
    progress_bar.end()
コード例 #11
0
 def _run_for_core_subsets(self, core_subsets, transceiver):
     progress_bar = ProgressBar(len(core_subsets), "Extracting IOBUF")
     error_entries = list()
     warn_entries = list()
     io_buffers = list(transceiver.get_iobuf(core_subsets))
     for io_buffer in io_buffers:
         self._check_iobuf_for_error(io_buffer, error_entries, warn_entries)
         progress_bar.update()
     progress_bar.end()
     return io_buffers, error_entries, warn_entries
コード例 #12
0
def partitioner_report(report_folder, hostname, graph, graph_mapper):
    """ Generate report on the placement of sub-vertices onto cores.
    """

    # Cycle through all vertices, and for each cycle through its sub-vertices.
    # For each sub-vertex, describe its core mapping.
    file_name = os.path.join(report_folder, "partitioned_by_vertex.rpt")
    f_place_by_vertex = None
    try:
        f_place_by_vertex = open(file_name, "w")
    except IOError:
        logger.error("Generate_placement_reports: Can't open file {} for"
                     " writing.".format(file_name))

    f_place_by_vertex.write(
        "        Placement Information by Vertex\n")
    f_place_by_vertex.write("        ===============================\n\n")
    time_date_string = time.strftime("%c")
    f_place_by_vertex.write("Generated: {}".format(time_date_string))
    f_place_by_vertex.write(" for target machine '{}'".format(hostname))
    f_place_by_vertex.write("\n\n")

    vertices = sorted(graph.vertices, key=lambda x: x.label)
    progress_bar = ProgressBar(len(vertices),
                               "Generating partitioner report")
    for v in vertices:
        vertex_name = v.label
        vertex_model = v.model_name
        num_atoms = v.n_atoms
        f_place_by_vertex.write(
            "**** Vertex: '{}'\n".format(vertex_name))
        f_place_by_vertex.write("Model: {}\n".format(vertex_model))
        f_place_by_vertex.write("Pop size: {}\n".format(num_atoms))
        f_place_by_vertex.write("Sub-vertices: \n")

        partitioned_vertices = \
            sorted(graph_mapper.get_subvertices_from_vertex(v),
                   key=lambda x: x.label)
        partitioned_vertices = \
            sorted(partitioned_vertices,
                   key=lambda x: graph_mapper.get_subvertex_slice(x).lo_atom)
        for sv in partitioned_vertices:
            lo_atom = graph_mapper.get_subvertex_slice(sv).lo_atom
            hi_atom = graph_mapper.get_subvertex_slice(sv).hi_atom
            num_atoms = hi_atom - lo_atom + 1
            my_string = "  Slice {}:{} ({} atoms) \n"\
                        .format(lo_atom, hi_atom, num_atoms)
            f_place_by_vertex.write(my_string)
            f_place_by_vertex.flush()
        f_place_by_vertex.write("\n")
        progress_bar.update()

    # Close file:
    f_place_by_vertex.close()
    progress_bar.end()
コード例 #13
0
    def __call__(self, subgraph, graph_mapper):
        """
        :param subgraph: the subgraph whose edges are to be filtered
        :param graph_mapper: the graph mapper between partitionable and \
                partitioned graphs.
        :return: a new graph mapper and partitioned graph
        """
        new_sub_graph = PartitionedGraph(label=subgraph.label)
        new_graph_mapper = GraphMapper(graph_mapper.first_graph_label,
                                       subgraph.label)

        # create progress bar
        progress_bar = ProgressBar(
            len(subgraph.subvertices) + len(subgraph.subedges),
            "Filtering edges")

        # add the subverts directly, as they wont be pruned.
        for subvert in subgraph.subvertices:
            new_sub_graph.add_subvertex(subvert)
            associated_vertex = graph_mapper.get_vertex_from_subvertex(subvert)
            vertex_slice = graph_mapper.get_subvertex_slice(subvert)
            new_graph_mapper.add_subvertex(subvertex=subvert,
                                           vertex_slice=vertex_slice,
                                           vertex=associated_vertex)
            progress_bar.update()

        # start checking subedges to decide which ones need pruning....
        for subvert in subgraph.subvertices:
            out_going_partitions = \
                subgraph.outgoing_edges_partitions_from_vertex(subvert)
            for partitioner_identifier in out_going_partitions:
                for subedge in \
                        out_going_partitions[partitioner_identifier].edges:
                    if not self._is_filterable(subedge, graph_mapper):
                        logger.debug(
                            "this subedge was not pruned {}".format(subedge))
                        new_sub_graph.add_subedge(subedge,
                                                  partitioner_identifier)
                        associated_edge = graph_mapper.\
                            get_partitionable_edge_from_partitioned_edge(
                                subedge)
                        new_graph_mapper.add_partitioned_edge(
                            subedge, associated_edge)
                    else:
                        logger.debug(
                            "this subedge was pruned {}".format(subedge))
                    progress_bar.update()
        progress_bar.end()

        # returned the pruned partitioned_graph and graph_mapper
        return {
            'new_sub_graph': new_sub_graph,
            'new_graph_mapper': new_graph_mapper
        }
    def __call__(self, executable_targets, app_id, transceiver,
                 loaded_application_data_token):
        """ Go through the executable targets and load each binary to \
            everywhere and then send a start request to the cores that \
            actually use it
        """

        if not loaded_application_data_token:
            raise exceptions.ConfigurationException(
                "The token for having loaded the application data token is set"
                " to false and therefore I cannot run. Please fix and try "
                "again")

        progress_bar = ProgressBar(executable_targets.total_processors,
                                   "Loading executables onto the machine")
        for executable_target_key in executable_targets.binaries:
            file_reader = FileDataReader(executable_target_key)
            core_subset = executable_targets.get_cores_for_binary(
                executable_target_key)

            statinfo = os.stat(executable_target_key)
            size = statinfo.st_size

            # TODO there is a need to parse the binary and see if its
            # ITCM and DTCM requirements are within acceptable params for
            # operating on spinnaker. Currently there just a few safety
            # checks which may not be accurate enough.
            if size > constants.MAX_SAFE_BINARY_SIZE:
                logger.warn(
                    "The size of {} is large enough that its"
                    " possible that the binary may be larger than what is"
                    " supported by spinnaker currently. Please reduce the"
                    " binary size if it starts to behave strangely, or goes"
                    " into the WDOG state before starting.".format(
                        executable_target_key))
                if size > constants.MAX_POSSIBLE_BINARY_SIZE:
                    raise exceptions.ConfigurationException(
                        "The size of {} is too large and therefore"
                        " will very likely cause a WDOG state. Until a more"
                        " precise measurement of ITCM and DTCM can be produced"
                        " this is deemed as an error state. Please reduce the"
                        " size of your binary or circumvent this error check.".
                        format(executable_target_key))

            transceiver.execute_flood(core_subset, file_reader, app_id, size)

            acutal_cores_loaded = 0
            for chip_based in core_subset.core_subsets:
                for _ in chip_based.processor_ids:
                    acutal_cores_loaded += 1
            progress_bar.update(amount_to_add=acutal_cores_loaded)
        progress_bar.end()

        return {"LoadBinariesToken": True}
コード例 #15
0
    def get_synaptic_list_from_machine(self, graph_mapper, partitioned_graph,
                                       placements, transceiver, routing_infos):
        """ Get synaptic data for all connections in this Projection from the\
            machine.
        """
        if self._stored_synaptic_data_from_machine is None:
            timer = None
            if conf.config.getboolean("Reports", "display_algorithm_timings"):
                timer = Timer()
                timer.start_timing()

            subedges = \
                graph_mapper.get_partitioned_edges_from_partitionable_edge(
                    self)
            if subedges is None:
                subedges = list()

            synaptic_list = copy.copy(self._synapse_list)
            synaptic_list_rows = synaptic_list.get_rows()
            progress_bar = ProgressBar(
                len(subedges),
                "Reading back synaptic matrix for edge between"
                " {} and {}".format(self._pre_vertex.label,
                                    self._post_vertex.label))
            for subedge in subedges:
                n_rows = subedge.get_n_rows(graph_mapper)
                pre_vertex_slice = \
                    graph_mapper.get_subvertex_slice(subedge.pre_subvertex)
                post_vertex_slice = \
                    graph_mapper.get_subvertex_slice(subedge.post_subvertex)

                sub_edge_post_vertex = \
                    graph_mapper.get_vertex_from_subvertex(
                        subedge.post_subvertex)
                rows = sub_edge_post_vertex.get_synaptic_list_from_machine(
                    placements, transceiver, subedge.pre_subvertex, n_rows,
                    subedge.post_subvertex,
                    self._synapse_row_io, partitioned_graph,
                    routing_infos, subedge.weight_scales).get_rows()

                for i in range(len(rows)):
                    synaptic_list_rows[
                        i + pre_vertex_slice.lo_atom].set_slice_values(
                            rows[i], vertex_slice=post_vertex_slice)
                progress_bar.update()
            progress_bar.end()
            self._stored_synaptic_data_from_machine = synaptic_list
            if conf.config.getboolean("Reports", "display_algorithm_timings"):
                logger.info("Time to read matrix: {}".format(
                    timer.take_sample()))

        return self._stored_synaptic_data_from_machine
コード例 #16
0
    def _do_allocation(self, ordered_subverts, placements, machine):

        # Iterate over subvertices and generate placements
        progress_bar = ProgressBar(len(ordered_subverts),
                                   "Placing graph vertices")
        resource_tracker = ResourceTracker(
            machine, self._generate_radial_chips(machine))

        # iterate over subverts
        for subvertex_list in ordered_subverts:

            # if too many one to ones to fit on a chip, allocate individually
            if len(subvertex_list) > self.MAX_CORES_PER_CHIP_TO_CONSIDER:
                for subvertex in subvertex_list:
                    self._allocate_individual(subvertex, placements,
                                              progress_bar, resource_tracker)
            else:  # can allocate in one block

                # merge constraints
                placement_constraint, ip_tag_constraints, \
                    reverse_ip_tag_constraints = \
                    self._merge_constraints(subvertex_list)
                # locate most cores on a chip
                max_size_on_a_chip = resource_tracker.\
                    max_available_cores_on_chips_that_satisfy(
                        placement_constraint, ip_tag_constraints,
                        reverse_ip_tag_constraints)

                # if size fits block allocate, otherwise allocate individually
                if max_size_on_a_chip < len(subvertex_list):

                    # collect resource requirement
                    resources = list()
                    for subvert in subvertex_list:
                        resources.append(subvert.resources_required)

                    # get cores
                    cores = resource_tracker.allocate_group(
                        resources, placement_constraint, ip_tag_constraints,
                        reverse_ip_tag_constraints)

                    # allocate cores to subverts
                    for subvert, (x, y, p, _, _) in zip(subvertex_list, cores):
                        placement = Placement(subvert, x, y, p)
                        placements.add_placement(placement)
                        progress_bar.update()
                else:
                    for subvertex in subvertex_list:
                        self._allocate_individual(subvertex, placements,
                                                  progress_bar,
                                                  resource_tracker)
        progress_bar.end()
コード例 #17
0
 def _run_for_placements(self, placements, transceiver):
     io_buffers = list()
     error_entries = list()
     warn_entries = list()
     progress_bar = ProgressBar(len(placements), "Extracting IOBUF")
     for placement in placements:
         iobuf = transceiver.get_iobuf_from_core(placement.x, placement.y,
                                                 placement.p)
         io_buffers.append(iobuf)
         self._check_iobuf_for_error(iobuf, error_entries, warn_entries)
         progress_bar.update()
     progress_bar.end()
     return io_buffers, error_entries, warn_entries
コード例 #18
0
    def _handle_external_algorithm(self, algorithm):
        """ Creates the input files for the algorithm

        :param algorithm: the algorithm
        :return: None
        """
        input_params = self._create_input_commands(algorithm)

        inputs = \
            [a.format(**input_params) for a in algorithm.command_line_args]

        # output debug info in case things go wrong
        logger.debug(
            "The inputs to the external mapping function are {}".format(
                inputs))

        # create progress bar for external algorithm
        algorithm_progress_bar = ProgressBar(
            1, "Running external algorithm {}".format(algorithm.algorithm_id))

        timer = None
        if self._do_timing:
            timer = Timer()
            timer.start_timing()

        # execute other command
        child = subprocess.Popen(inputs,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE,
                                 stdin=subprocess.PIPE)
        child.wait()
        algorithm_progress_bar.end()

        if self._do_timing:
            self._update_timings(timer, algorithm)

        # check the return code for a successful execution
        if child.returncode != 0:
            stdout, stderr = child.communicate()
            raise exceptions.\
                PacmanExternalAlgorithmFailedToCompleteException(
                    "Algorithm {} returned a non-zero error code {}\n"
                    "    Inputs: {}\n"
                    "    Output: {}\n"
                    "    Error: {}\n".format(
                        algorithm.algorithm_id, child.returncode,
                        inputs, stdout, stderr))

        outputs = self._sort_out_external_algorithm_outputs(algorithm)
        self._map_output_parameters(outputs, algorithm)
コード例 #19
0
    def __call__(self, partitioned_graph, partitionable_graph=None,
                 graph_mapper=None):

        # Generate an n_keys map for the graph and add constraints
        n_keys_map = DictBasedPartitionedPartitionNKeysMap()

        # generate progress bar
        progress_bar = ProgressBar(
            len(partitioned_graph.subvertices),
            "Deducing edge to number of keys map")

        # contains a partitionable vertex
        if partitionable_graph is not None and graph_mapper is not None:
            # iterate over each partition in the partitioned graph
            for vertex in partitioned_graph.subvertices:
                partitions = \
                    partitioned_graph.outgoing_edges_partitions_from_vertex(
                        vertex)
                for partition_id in partitions:
                    partition = partitions[partition_id]
                    added_constraints = False
                    constraints = self._process_partitionable_partition(
                        partition, n_keys_map, partition_id, graph_mapper,
                        partitionable_graph)
                    if not added_constraints:
                        partition.add_constraints(constraints)
                    else:
                        self._check_constraints_equal(
                            constraints, partition.constraints)
                progress_bar.update()
            progress_bar.end()
        else:
            for vertex in partitioned_graph.subvertices:
                partitions = \
                    partitioned_graph.outgoing_edges_partitions_from_vertex(
                        vertex)
                for partition_id in partitions:
                    partition = partitions[partition_id]
                    added_constraints = False
                    constraints = self._process_partitioned_partition(
                        partition, n_keys_map, partition_id, partitioned_graph)
                    if not added_constraints:
                        partition.add_constraints(constraints)
                    else:
                        self._check_constraints_equal(
                            constraints, partition.constraints)
                progress_bar.update()
            progress_bar.end()

        return {'n_keys_map': n_keys_map}
コード例 #20
0
    def load_initial_buffers(self):
        """ Load the initial buffers for the senders using mem writes
        """
        total_data = 0
        for vertex in self._sender_vertices:
            for region in vertex.get_regions():
                total_data += vertex.get_region_buffer_size(region)

        progress_bar = ProgressBar(
            total_data, "Loading buffers ({} bytes)".format(total_data))
        for vertex in self._sender_vertices:
            for region in vertex.get_regions():
                self._send_initial_messages(vertex, region, progress_bar)
        progress_bar.end()
コード例 #21
0
    def _get_synaptic_data(self, as_list, data_to_get):

        post_vertex = self._projection_edge.post_vertex
        pre_vertex = self._projection_edge.pre_vertex

        # If in virtual board mode, the connection data should be set
        if self._virtual_connection_list is not None:
            post_vertex = self._projection_edge.post_vertex
            pre_vertex = self._projection_edge.pre_vertex
            return ConnectionHolder(data_to_get, as_list, pre_vertex.n_atoms,
                                    post_vertex.n_atoms,
                                    self._virtual_connection_list)

        connection_holder = ConnectionHolder(data_to_get, as_list,
                                             pre_vertex.n_atoms,
                                             post_vertex.n_atoms)

        # If we haven't run, add the holder to get connections, and return it
        if not self._spinnaker.has_ran:

            post_vertex.add_pre_run_connection_holder(
                connection_holder, self._projection_edge,
                self._synapse_information)
            return connection_holder

        # Otherwise, get the connections now
        graph_mapper = self._spinnaker.graph_mapper
        placements = self._spinnaker.placements
        transceiver = self._spinnaker.transceiver
        routing_infos = self._spinnaker.routing_infos
        partitioned_graph = self._spinnaker.partitioned_graph
        subedges = graph_mapper.get_partitioned_edges_from_partitionable_edge(
            self._projection_edge)
        progress = ProgressBar(
            len(subedges),
            "Getting {}s for projection between {} and {}".format(
                data_to_get, pre_vertex.label, post_vertex.label))
        for subedge in subedges:
            placement = placements.get_placement_of_subvertex(
                subedge.post_subvertex)
            connections = post_vertex.get_connections_from_machine(
                transceiver, placement, subedge, graph_mapper, routing_infos,
                self._synapse_information, partitioned_graph)
            if connections is not None:
                connection_holder.add_connections(connections)
            progress.update()
        progress.end()
        connection_holder.finish()
        return connection_holder
コード例 #22
0
    def __call__(self, machine_graph, graph_mapper):
        """
        :param machine_graph: the machine_graph whose edges are to be filtered
        :param graph_mapper: the graph mapper between graphs
        :return: a new graph mapper and machine graph
        """
        new_machine_graph = MachineGraph(label=machine_graph.label)
        new_graph_mapper = GraphMapper()

        # create progress bar
        progress_bar = ProgressBar(
            machine_graph.n_vertices +
            machine_graph.n_outgoing_edge_partitions, "Filtering edges")

        # add the vertices directly, as they wont be pruned.
        for vertex in machine_graph.vertices:
            new_machine_graph.add_vertex(vertex)
            associated_vertex = graph_mapper.get_application_vertex(vertex)
            vertex_slice = graph_mapper.get_slice(vertex)
            new_graph_mapper.add_vertex_mapping(
                machine_vertex=vertex,
                vertex_slice=vertex_slice,
                application_vertex=associated_vertex)
            progress_bar.update()

        # start checking edges to decide which ones need pruning....
        for partition in machine_graph.outgoing_edge_partitions:
            for edge in partition.edges:
                if not self._is_filterable(edge, graph_mapper):
                    logger.debug("this edge was not pruned {}".format(edge))
                    new_machine_graph.add_edge(edge, partition.identifier)
                    app_edge = graph_mapper.get_application_edge(edge)
                    new_graph_mapper.add_edge_mapping(edge, app_edge)

                    # add partition constraints from the original graph to
                    # the new graph
                    # add constraints from the application partition
                    new_machine_graph_partition = new_machine_graph.\
                        get_outgoing_edge_partition_starting_at_vertex(
                            edge.pre_vertex, partition.identifier)
                    new_machine_graph_partition.add_constraints(
                        partition.constraints)
                else:
                    logger.debug("this edge was pruned {}".format(edge))
            progress_bar.update()
        progress_bar.end()

        # returned the pruned graph and graph_mapper
        return new_machine_graph, new_graph_mapper
コード例 #23
0
    def _write_router_provenance_data(self, router_tables, machine, txrx):
        """ Writes the provenance data of the router diagnostics

        :param router_tables: the routing tables generated by pacman
        :param machine: the spinnMachine object
        :param txrx: the transceiver object
        :return: None
        """
        progress = ProgressBar(machine.n_chips, "Getting Router Provenance")

        # acquire diagnostic data
        items = list()
        seen_chips = set()

        for router_table in sorted(
                router_tables.routing_tables,
                key=lambda table: (table.x, table.y)):
            x = router_table.x
            y = router_table.y
            if not machine.get_chip_at(x, y).virtual:
                router_diagnostic = txrx.get_router_diagnostics(x, y)
                seen_chips.add((x, y))
                reinjector_status = txrx.get_reinjection_status(x, y)
                items.extend(self._write_router_diagnostics(
                    x, y, router_diagnostic, reinjector_status, True))
                self._add_totals(router_diagnostic, reinjector_status)
            progress.update()

        for chip in sorted(machine.chips, key=lambda c: (c.x, c.y)):
            if not chip.virtual and (chip.x, chip.y) not in seen_chips:
                try:
                    diagnostic = txrx.get_router_diagnostics(chip.x, chip.y)

                    if (diagnostic.n_dropped_multicast_packets != 0 or
                            diagnostic.n_local_multicast_packets != 0 or
                            diagnostic.n_external_multicast_packets != 0):

                        reinjector_status = txrx.get_reinjection_status(
                            chip.x, chip.y)
                        items.extend(self._write_router_diagnostics(
                            chip.x, chip.y, diagnostic, reinjector_status,
                            False))
                        self._add_totals(diagnostic, reinjector_status)
                        progress.update()
                except Exception:
                    # There could be issues with unused chips - don't worry!
                    pass
        progress.end()
        return items
コード例 #24
0
    def _get_synaptic_data(self, as_list, data_to_get):

        post_vertex = self._projection_edge.post_vertex
        pre_vertex = self._projection_edge.pre_vertex

        # If in virtual board mode, the connection data should be set
        if self._virtual_connection_list is not None:
            post_vertex = self._projection_edge.post_vertex
            pre_vertex = self._projection_edge.pre_vertex
            return ConnectionHolder(
                data_to_get, as_list, pre_vertex.n_atoms, post_vertex.n_atoms,
                self._virtual_connection_list)

        connection_holder = ConnectionHolder(
            data_to_get, as_list, pre_vertex.n_atoms, post_vertex.n_atoms)

        # If we haven't run, add the holder to get connections, and return it
        if not self._spinnaker.has_ran:

            post_vertex.add_pre_run_connection_holder(
                connection_holder, self._projection_edge,
                self._synapse_information)
            return connection_holder

        # Otherwise, get the connections now
        graph_mapper = self._spinnaker.graph_mapper
        placements = self._spinnaker.placements
        transceiver = self._spinnaker.transceiver
        routing_infos = self._spinnaker.routing_infos
        partitioned_graph = self._spinnaker.partitioned_graph
        subedges = graph_mapper.get_partitioned_edges_from_partitionable_edge(
            self._projection_edge)
        progress = ProgressBar(
            len(subedges),
            "Getting {}s for projection between {} and {}".format(
                data_to_get, pre_vertex.label, post_vertex.label))
        for subedge in subedges:
            placement = placements.get_placement_of_subvertex(
                subedge.post_subvertex)
            connections = post_vertex.get_connections_from_machine(
                transceiver, placement, subedge, graph_mapper, routing_infos,
                self._synapse_information, partitioned_graph)
            if connections is not None:
                connection_holder.add_connections(connections)
            progress.update()
        progress.end()
        connection_holder.finish()
        return connection_holder
コード例 #25
0
    def __call__(self, partitioned_graph, machine):
        progress_bar = ProgressBar(9, "Placing and Routing")

        vertices_resources, nets, net_names = \
            rig_converters.convert_to_rig_partitioned_graph(
                partitioned_graph)
        progress_bar.update()

        rig_machine = rig_converters.convert_to_rig_machine(machine)
        progress_bar.update()

        rig_constraints = rig_converters.create_rig_machine_constraints(
            machine)
        progress_bar.update()

        rig_constraints.extend(
            rig_converters.create_rig_partitioned_graph_constraints(
                partitioned_graph, rig_machine))
        progress_bar.update()

        rig_placements = place(vertices_resources, nets, rig_machine,
                               rig_constraints)
        progress_bar.update()

        rig_allocations = allocate(vertices_resources, nets, rig_machine,
                                   rig_constraints, rig_placements)
        progress_bar.update()

        rig_routes = route(vertices_resources, nets, rig_machine,
                           rig_constraints, rig_placements, rig_allocations,
                           "cores")
        rig_routes = {
            name: rig_routes[net]
            for net, name in net_names.iteritems()
        }
        progress_bar.update()

        placements = rig_converters.convert_from_rig_placements(
            rig_placements, rig_allocations, partitioned_graph)
        progress_bar.update()
        routes = rig_converters.convert_from_rig_routes(
            rig_routes, partitioned_graph)
        progress_bar.update()
        progress_bar.end()

        return {"placements": placements, "routing_paths": routes}
コード例 #26
0
    def __call__(
            self, placements, graph_mapper, tags, executable_finder,
            partitioned_graph, partitionable_graph, routing_infos, hostname,
            report_default_directory, write_text_specs,
            app_data_runtime_folder):

        # Keep the results
        executable_targets = ExecutableTargets()
        dsg_targets = dict()

        # Keep delay extensions until the end
        delay_extension_placements = list()

        # create a progress bar for end users
        progress_bar = ProgressBar(len(list(placements.placements)),
                                   "Generating sPyNNaker data specifications")
        for placement in placements.placements:
            associated_vertex = graph_mapper.get_vertex_from_subvertex(
                placement.subvertex)

            if isinstance(associated_vertex, DelayExtensionVertex):
                delay_extension_placements.append(
                    (placement, associated_vertex))
            else:
                self._generate_data_spec_for_subvertices(
                    placement, associated_vertex, executable_targets,
                    dsg_targets, graph_mapper, tags, executable_finder,
                    partitioned_graph, partitionable_graph, routing_infos,
                    hostname, report_default_directory, write_text_specs,
                    app_data_runtime_folder)
                progress_bar.update()

        for placement, associated_vertex in delay_extension_placements:
            self._generate_data_spec_for_subvertices(
                placement, associated_vertex, executable_targets,
                dsg_targets, graph_mapper, tags, executable_finder,
                partitioned_graph, partitionable_graph, routing_infos,
                hostname, report_default_directory, write_text_specs,
                app_data_runtime_folder)
            progress_bar.update()

        # finish the progress bar
        progress_bar.end()

        return {'executable_targets': executable_targets,
                'dsg_targets': dsg_targets}
コード例 #27
0
    def __call__(self,
                 placements,
                 graph,
                 hostname,
                 report_default_directory,
                 write_text_specs,
                 app_data_runtime_folder,
                 machine,
                 graph_mapper=None):

        # Keep the results
        dsg_targets = dict()

        # Keep delay extensions until the end
        delay_extension_placements = list()

        # create a progress bar for end users
        progress_bar = ProgressBar(len(list(placements.placements)),
                                   "Generating sPyNNaker data specifications")
        for placement in placements.placements:
            associated_vertex = graph_mapper.get_application_vertex(
                placement.vertex)

            if isinstance(associated_vertex, DelayExtensionVertex):
                delay_extension_placements.append(
                    (placement, associated_vertex))
            else:
                self._generate_data_spec_for_vertices(
                    placement, associated_vertex, dsg_targets, hostname,
                    report_default_directory, write_text_specs,
                    app_data_runtime_folder, machine)
                progress_bar.update()

        for placement, associated_vertex in delay_extension_placements:
            self._generate_data_spec_for_vertices(placement, associated_vertex,
                                                  dsg_targets, hostname,
                                                  report_default_directory,
                                                  write_text_specs,
                                                  app_data_runtime_folder,
                                                  machine)
            progress_bar.update()

        # finish the progress bar
        progress_bar.end()

        return dsg_targets
コード例 #28
0
def generate_sub_edges(subgraph, graph_to_subgraph_mapper, graph):
    """ Generate the sub edges for the subvertices in the graph

    :param subgraph: the partitioned graph to work with
    :type subgraph:\
                :py:class:`pacman.model.partitioned_graph.partitioned_graph.PartitionedGraph`
    :param graph_to_subgraph_mapper: the mapper between the \
                partitionable graph and the partitioned graph
    :type graph_to_subgraph_mapper:\
                :py:class:`pacman.model.graph_mapper.GraphMapper`
    :param graph: the partitionable graph to work with
    :type graph:\
                :py:class:`pacman.model.graph.partitionable_graph.PartitionableGraph`
    """

    # start progress bar
    progress_bar = ProgressBar(len(subgraph.subvertices),
                               "Partitioning graph edges")

    # Partition edges according to vertex partitioning
    for src_sv in subgraph.subvertices:

        # For each out edge of the parent vertex...
        vertex = graph_to_subgraph_mapper.get_vertex_from_subvertex(src_sv)
        outgoing_partitions = \
            graph.outgoing_edges_partitions_from_vertex(vertex)
        for outgoing_partition_identifer in outgoing_partitions:
            partition = outgoing_partitions[outgoing_partition_identifer]
            out_edges = partition.edges
            partition_constraints = partition.constraints
            for edge in out_edges:

                # and create and store a new subedge for each post-subvertex
                post_vertex = edge.post_vertex
                post_subverts = (graph_to_subgraph_mapper
                                 .get_subvertices_from_vertex(post_vertex))
                for dst_sv in post_subverts:
                    subedge = edge.create_subedge(src_sv, dst_sv)
                    subgraph.add_subedge(subedge,
                                         outgoing_partition_identifer,
                                         partition_constraints)
                    graph_to_subgraph_mapper.add_partitioned_edge(
                        subedge, edge)
        progress_bar.update()
    progress_bar.end()
    def __call__(self,
                 partitioned_graph,
                 partitionable_graph=None,
                 provenance_data_objects=None):
        """

        :param partitioned_graph: The partitioned graph to inspect
        :param partitionable_graph: The optional partitionable graph
        :param provenance_data_objects: Any existing objects to append to
        """

        if provenance_data_objects is not None:
            prov_items = provenance_data_objects
        else:
            prov_items = list()

        progress = ProgressBar(
            len(partitioned_graph.subvertices) +
            len(partitioned_graph.subedges),
            "Getting provenance data from partitioned graph")
        for subvertex in partitioned_graph.subvertices:
            if isinstance(subvertex, AbstractProvidesLocalProvenanceData):
                prov_items.extend(subvertex.get_local_provenance_data())
            progress.update()
        for subedge in partitioned_graph.subedges:
            if isinstance(subedge, AbstractProvidesLocalProvenanceData):
                prov_items.extend(subedge.get_local_provenance_data())
            progress.update()
        progress.end()

        if partitionable_graph is not None:
            progress = ProgressBar(
                len(partitionable_graph.vertices) +
                len(partitionable_graph.edges),
                "Getting provenance data from partitionable graph")
            for vertex in partitionable_graph.vertices:
                if isinstance(vertex, AbstractProvidesLocalProvenanceData):
                    prov_items.extend(vertex.get_local_provenance_data())
                progress.update()
            for edge in partitionable_graph.edges:
                if isinstance(edge, AbstractProvidesLocalProvenanceData):
                    prov_items.extend(edge.get_local_provenance_data())
            progress.end()

        return {'prov_items': prov_items}
コード例 #30
0
def generate_comparison_router_report(
        report_folder, routing_tables, compressed_routing_tables):
    """ Make a report on comparison of the compressed and uncompressed \
        routing tables

    :param report_folder: the folder to store the resulting report
    :param routing_tables: the original routing tables
    :param compressed_routing_tables: the compressed routing tables
    :return: None
    """
    file_name = os.path.join(
        report_folder, "comparison_of_compressed_uncompressed_routing_tables")

    output = None
    try:
        output = open(file_name, "w")
    except IOError:
        logger.error("Generate_router_comparison_reports: Can't open file"
                     " {} for writing.".format(file_name))

    progress_bar = ProgressBar(len(routing_tables.routing_tables),
                               "Generating comparison of router table report")

    for uncompressed_table in routing_tables.routing_tables:
        x = uncompressed_table.x
        y = uncompressed_table.y
        compressed_table = compressed_routing_tables.\
            get_routing_table_for_chip(x, y)

        n_entries_un_compressed = uncompressed_table.number_of_entries
        n_entries_compressed = compressed_table.number_of_entries
        percentage = ((float(n_entries_un_compressed - n_entries_compressed)) /
                      float(n_entries_un_compressed)) * 100

        output.write(
            "Uncompressed table at {}:{} has {} entries whereas compressed "
            "table has {} entries. This is a decrease of {} %\n".format(
                x, y, n_entries_un_compressed, n_entries_compressed,
                percentage))
        progress_bar.update()
    progress_bar.end()
    output.flush()
    output.close()
コード例 #31
0
def router_report_from_router_tables(report_folder, routing_tables):
    """

    :param report_folder:
    :param routing_tables:
    :return:
    """

    top_level_folder = os.path.join(report_folder, "routing_tables_generated")
    if not os.path.exists(top_level_folder):
        os.mkdir(top_level_folder)
    progress_bar = ProgressBar(len(routing_tables.routing_tables),
                               "Generating Router table report")

    for routing_table in routing_tables.routing_tables:
        if routing_table.number_of_entries > 0:
            _generate_routing_table(routing_table, top_level_folder)
        progress_bar.update()
    progress_bar.end()
コード例 #32
0
    def __call__(self, machine_graph, graph_mapper):
        """
        :param machine_graph: the machine_graph whose edges are to be updated
        :param graph_mapper: the graph mapper between graphs
        """

        # create progress bar
        progress_bar = ProgressBar(machine_graph.n_outgoing_edge_partitions,
                                   "Updating edge weights")

        # start checking edges to decide which ones need pruning....
        for partition in machine_graph.outgoing_edge_partitions:
            for edge in partition.edges:
                if isinstance(edge, AbstractWeightUpdatable):
                    edge.update_weight(graph_mapper)
            progress_bar.update()
        progress_bar.end()

        # return nothing
        return machine_graph
コード例 #33
0
    def __call__(self, subgraph, graph_mapper):
        """
        :param subgraph: the subgraph whose edges are to be updated
        :param graph_mapper: the graph mapper between partitionable and \
                partitioned graphs.
        """

        # create progress bar
        progress_bar = ProgressBar(
            len(subgraph.subedges), "Updating edge weights")

        # start checking subedges to decide which ones need pruning....
        for subedge in subgraph.subedges:
            if isinstance(subedge, AbstractWeightUpdatable):
                subedge.update_weight(graph_mapper)
            progress_bar.update()
        progress_bar.end()

        # return nothing
        return {'subgraph': subgraph}
コード例 #34
0
    def get_spikes(self, label, buffer_manager, region, state_region,
                   placements, graph_mapper, partitionable_vertex):

        spike_times = list()
        spike_ids = list()
        ms_per_tick = self._machine_time_step / 1000.0

        subvertices = \
            graph_mapper.get_subvertices_from_vertex(partitionable_vertex)

        missing_str = ""

        progress_bar = ProgressBar(len(subvertices),
                                   "Getting spikes for {}".format(label))
        for subvertex in subvertices:

            placement = placements.get_placement_of_subvertex(subvertex)
            subvertex_slice = graph_mapper.get_subvertex_slice(subvertex)

            x = placement.x
            y = placement.y
            p = placement.p
            lo_atom = subvertex_slice.lo_atom

            # Read the spikes
            n_words = int(math.ceil(subvertex_slice.n_atoms / 32.0))
            n_bytes = n_words * 4
            n_words_with_timestamp = n_words + 1

            # for buffering output info is taken form the buffer manager
            neuron_param_region_data_pointer, data_missing = \
                buffer_manager.get_data_for_vertex(
                    placement, region, state_region)
            if data_missing:
                missing_str += "({}, {}, {}); ".format(x, y, p)
            record_raw = neuron_param_region_data_pointer.read_all()
            raw_data = (numpy.asarray(record_raw, dtype="uint8").
                        view(dtype="<i4")).reshape(
                [-1, n_words_with_timestamp])
            split_record = numpy.array_split(raw_data, [1, 1], 1)
            record_time = split_record[0] * float(ms_per_tick)
            spikes = split_record[2].byteswap().view("uint8")
            bits = numpy.fliplr(numpy.unpackbits(spikes).reshape(
                (-1, 32))).reshape((-1, n_bytes * 8))
            time_indices, indices = numpy.where(bits == 1)
            times = record_time[time_indices].reshape((-1))
            indices = indices + lo_atom
            spike_ids.append(indices)
            spike_times.append(times)
            progress_bar.update()

        progress_bar.end()
        if len(missing_str) > 0:
            logger.warn(
                "Population {} is missing spike data in region {} from the"
                " following cores: {}".format(label, region, missing_str))

        spike_ids = numpy.hstack(spike_ids)
        spike_times = numpy.hstack(spike_times)
        result = numpy.dstack((spike_ids, spike_times))[0]
        return result[numpy.lexsort((spike_times, spike_ids))]
コード例 #35
0
    def get_spikes(self, label, buffer_manager, region, state_region,
                   placements, graph_mapper, partitionable_vertex):

        spike_times = list()
        spike_ids = list()
        ms_per_tick = self._machine_time_step / 1000.0

        subvertices = \
            graph_mapper.get_subvertices_from_vertex(partitionable_vertex)

        missing_str = ""

        progress_bar = ProgressBar(len(subvertices),
                                   "Getting spikes for {}".format(label))
        for subvertex in subvertices:

            placement = placements.get_placement_of_subvertex(subvertex)
            subvertex_slice = graph_mapper.get_subvertex_slice(subvertex)

            x = placement.x
            y = placement.y
            p = placement.p
            lo_atom = subvertex_slice.lo_atom

            # Read the spikes
            n_words = int(math.ceil(subvertex_slice.n_atoms / 32.0))
            n_bytes_per_block = n_words * 4

            # for buffering output info is taken form the buffer manager
            neuron_param_region_data_pointer, data_missing = \
                buffer_manager.get_data_for_vertex(
                    placement, region, state_region)
            if data_missing:
                missing_str += "({}, {}, {}); ".format(x, y, p)
            raw_data = neuron_param_region_data_pointer.read_all()
            offset = 0
            while offset < len(raw_data):
                ((time, n_blocks), offset) = (
                    struct.unpack_from("<II", raw_data, offset), offset + 8)
                (spike_data, offset) = (numpy.frombuffer(
                    raw_data, dtype="uint8",
                    count=n_bytes_per_block * n_blocks, offset=offset),
                    offset + (n_bytes_per_block * n_blocks))
                spikes = spike_data.view("<i4").byteswap().view("uint8")
                bits = numpy.fliplr(numpy.unpackbits(spikes).reshape(
                    (-1, 32))).reshape((-1, n_bytes_per_block * 8))
                indices = numpy.nonzero(bits)[1]
                times = numpy.repeat([time * ms_per_tick], len(indices))
                indices = indices + lo_atom
                spike_ids.append(indices)
                spike_times.append(times)
            progress_bar.update()

        progress_bar.end()
        if len(missing_str) > 0:
            logger.warn(
                "Population {} is missing spike data in region {} from the"
                " following cores: {}".format(label, region, missing_str))

        spike_ids = numpy.hstack(spike_ids)
        spike_times = numpy.hstack(spike_times)
        result = numpy.dstack((spike_ids, spike_times))[0]
        return result[numpy.lexsort((spike_times, spike_ids))]