예제 #1
0
    def get_v(self, label, buffer_manager, region, state_region, placements,
              graph_mapper, partitionable_vertex):

        subvertices = \
            graph_mapper.get_subvertices_from_vertex(partitionable_vertex)

        ms_per_tick = self._machine_time_step / 1000.0

        data = list()
        missing_str = ""

        progress_bar = \
            ProgressBar(len(subvertices),
                        "Getting membrane voltage for {}".format(label))

        for subvertex in subvertices:

            vertex_slice = graph_mapper.get_subvertex_slice(subvertex)
            placement = placements.get_placement_of_subvertex(subvertex)

            x = placement.x
            y = placement.y
            p = placement.p

            # for buffering output info is taken form the buffer manager
            neuron_param_region_data_pointer, missing_data =\
                buffer_manager.get_data_for_vertex(
                    placement, region, state_region)
            if missing_data:
                missing_str += "({}, {}, {}); ".format(x, y, p)
            record_raw = neuron_param_region_data_pointer.read_all()
            record_length = len(record_raw)
            n_rows = record_length / ((vertex_slice.n_atoms + 1) * 4)
            record = (numpy.asarray(record_raw, dtype="uint8").
                      view(dtype="<i4")).reshape((n_rows,
                                                  (vertex_slice.n_atoms + 1)))
            split_record = numpy.array_split(record, [1, 1], 1)
            record_time = numpy.repeat(
                split_record[0] * float(ms_per_tick), vertex_slice.n_atoms, 1)
            record_ids = numpy.tile(
                numpy.arange(vertex_slice.lo_atom, vertex_slice.hi_atom + 1),
                len(record_time)).reshape((-1, vertex_slice.n_atoms))
            record_membrane_potential = split_record[2] / 32767.0

            part_data = numpy.dstack(
                [record_ids, record_time, record_membrane_potential])
            part_data = numpy.reshape(part_data, [-1, 3])
            data.append(part_data)
            progress_bar.update()

        progress_bar.end()
        if len(missing_str) > 0:
            logger.warn(
                "Population {} is missing membrane voltage data in region {}"
                " from the following cores: {}".format(
                    label, region, missing_str))
        data = numpy.vstack(data)
        order = numpy.lexsort((data[:, 1], data[:, 0]))
        result = data[order]
        return result
예제 #2
0
    def __call__(self, report_folder, application_graph):
        """

        :param report_folder: the report folder to put figure into
        :param application_graph: the app graph
        :rtype: None
        """
        try:
            import graphviz
        except:
            raise SpynnakerException(
                "graphviz is required to use this report.  Please install"
                " graphviz if you want to use this report.")

        # create holders for data
        vertex_holders = dict()
        dot_diagram = graphviz.Digraph(
            comment="The graph of the network in graphical form")

        # build progress bar for the vertices, edges, and rendering
        progress_bar = ProgressBar(
            application_graph.n_vertices +
            application_graph.n_outgoing_edge_partitions + 1,
            "generating the graphical representation of the neural network")

        # write vertices into dot diagram
        vertex_counter = 0
        for vertex in application_graph.vertices:
            dot_diagram.node(
                "{}".format(vertex_counter),
                "{} ({} neurons)".format(vertex.label, vertex.n_atoms))
            vertex_holders[vertex] = vertex_counter
            vertex_counter += 1
            progress_bar.update()

        # write edges into dot diagram
        for partition in application_graph.outgoing_edge_partitions:
            for edge in partition.edges:
                source_vertex_id = vertex_holders[edge.pre_vertex]
                dest_vertex_id = vertex_holders[edge.post_vertex]
                if isinstance(edge, ProjectionApplicationEdge):
                    for synapse_info in edge.synapse_information:
                        dot_diagram.edge(
                            "{}".format(source_vertex_id),
                            "{}".format(dest_vertex_id),
                            "{}".format(synapse_info.connector))
                else:
                    dot_diagram.edge(
                        "{}".format(source_vertex_id),
                        "{}".format(dest_vertex_id))
            progress_bar.update()

        # write dot file and generate pdf
        file_to_output = os.path.join(report_folder, "network_graph.gv")
        dot_diagram.render(file_to_output, view=False)
        progress_bar.update()
        progress_bar.end()
예제 #3
0
    def get_spikes(self, label, buffer_manager, region, state_region,
                   placements, graph_mapper, partitionable_vertex,
                   base_key_function):

        results = list()
        missing_str = ""
        ms_per_tick = self._machine_time_step / 1000.0
        subvertices = \
            graph_mapper.get_subvertices_from_vertex(partitionable_vertex)
        progress_bar = ProgressBar(len(subvertices),
                                   "Getting spikes for {}".format(label))

        for subvertex in subvertices:

            placement = placements.get_placement_of_subvertex(subvertex)
            subvertex_slice = graph_mapper.get_subvertex_slice(subvertex)

            x = placement.x
            y = placement.y
            p = placement.p

            # Read the spikes
            raw_spike_data, data_missing = \
                buffer_manager.get_data_for_vertex(
                    placement, region, state_region)
            if data_missing:
                missing_str += "({}, {}, {}); ".format(x, y, p)
            spike_data = str(raw_spike_data.read_all())
            number_of_bytes_written = len(spike_data)

            offset = 0
            while offset < number_of_bytes_written:
                eieio_header = EIEIODataHeader.from_bytestring(
                    spike_data, offset)
                offset += eieio_header.size
                timestamp = eieio_header.payload_base * ms_per_tick
                timestamps = numpy.repeat([timestamp], eieio_header.count)
                keys = numpy.frombuffer(
                    spike_data, dtype="<u4", count=eieio_header.count,
                    offset=offset)
                neuron_ids = ((keys - base_key_function(subvertex)) +
                              subvertex_slice.lo_atom)
                offset += eieio_header.count * 4
                results.append(numpy.dstack((neuron_ids, timestamps))[0])
            progress_bar.update()

        progress_bar.end()
        if len(missing_str) > 0:
            logger.warn(
                "Population {} is missing spike data in region {} from the"
                " following cores: {}".format(label, region, missing_str))
        if len(results) != 0:
            result = numpy.vstack(results)
            result = result[numpy.lexsort((result[:, 1], result[:, 0]))]
        else:
            result = []
        return result
 def update_runtime(self, run_time, infinite_run, core_subsets, n_cores):
     self._progress_bar = ProgressBar(n_cores, "Updating run time")
     for core_subset in core_subsets:
         for processor_id in core_subset.processor_ids:
             self._send_request(
                 SCPUpdateRuntimeRequest(
                     core_subset.x, core_subset.y, processor_id,
                     run_time, infinite_run,
                     constants.SDP_PORTS.RUNNING_COMMAND_SDP_PORT.value),
                 callback=self.receive_response)
     self._finish()
     self._progress_bar.end()
     self.check_for_error()
예제 #5
0
    def load_initial_buffers(self):
        """ Load the initial buffers for the senders using mem writes
        """
        total_data = 0
        for vertex in self._sender_vertices:
            for region in vertex.get_regions():
                total_data += vertex.get_region_buffer_size(region)

        progress_bar = ProgressBar(
            total_data, "Loading buffers ({} bytes)".format(total_data))
        for vertex in self._sender_vertices:
            for region in vertex.get_regions():
                self._send_initial_messages(vertex, region, progress_bar)
        progress_bar.end()
    def _handle_external_algorithm(self, algorithm):
        """ Creates the input files for the algorithm

        :param algorithm: the algorithm
        :return: None
        """
        input_params = self._create_input_commands(algorithm)

        inputs = \
            [a.format(**input_params) for a in algorithm.command_line_args]

        # output debug info in case things go wrong
        logger.debug(
            "The inputs to the external mapping function are {}".format(
                inputs))

        # create progress bar for external algorithm
        algorithm_progress_bar = ProgressBar(
            1, "Running external algorithm {}".format(algorithm.algorithm_id))

        timer = None
        if self._do_timing:
            timer = Timer()
            timer.start_timing()

        # execute other command
        child = subprocess.Popen(inputs,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE,
                                 stdin=subprocess.PIPE)
        child.wait()
        algorithm_progress_bar.end()

        if self._do_timing:
            self._update_timings(timer, algorithm)

        # check the return code for a successful execution
        if child.returncode != 0:
            stdout, stderr = child.communicate()
            raise exceptions.\
                PacmanExternalAlgorithmFailedToCompleteException(
                    "Algorithm {} returned a non-zero error code {}\n"
                    "    Inputs: {}\n"
                    "    Output: {}\n"
                    "    Error: {}\n".format(
                        algorithm.algorithm_id, child.returncode,
                        inputs, stdout, stderr))

        outputs = self._sort_out_external_algorithm_outputs(algorithm)
        self._map_output_parameters(outputs, algorithm)
예제 #7
0
def tag_allocator_report(report_folder, tag_infos):
    """ Reports the tags that are being used by the tool chain for this\
        simulation

    :param report_folder: the folder to which the reports are being written
    :param tag_infos: the tags container generated by the tools.
    :return: None
    """
    progress_bar = ProgressBar(
        len(list(tag_infos.ip_tags)) + len(list(tag_infos.reverse_ip_tags)),
        "Reporting Tags")

    file_name = os.path.join(report_folder, "tags.rpt")
    f_routing = None
    try:
        f_routing = open(file_name, "w")
    except IOError:
        logger.error("Generate_tag_report: Can't open file {} for "
                     "writing.".format(file_name))
    for ip_tag in tag_infos.ip_tags:
        f_routing.write("{}".format(ip_tag))
        progress_bar.update()
    for reverse_ip_tag in tag_infos.reverse_ip_tags:
        f_routing.write("{}".format(reverse_ip_tag))
        progress_bar.update()
    f_routing.flush()
    f_routing.close()
    progress_bar.end()
예제 #8
0
    def __call__(self, partitioned_graph, partitionable_graph=None,
                 graph_mapper=None):

        # Generate an n_keys map for the graph and add constraints
        n_keys_map = DictBasedPartitionedPartitionNKeysMap()

        # generate progress bar
        progress_bar = ProgressBar(
            len(partitioned_graph.subvertices),
            "Deducing edge to number of keys map")

        # contains a partitionable vertex
        if partitionable_graph is not None and graph_mapper is not None:
            # iterate over each partition in the partitioned graph
            for vertex in partitioned_graph.subvertices:
                partitions = \
                    partitioned_graph.outgoing_edges_partitions_from_vertex(
                        vertex)
                for partition_id in partitions:
                    partition = partitions[partition_id]
                    added_constraints = False
                    constraints = self._process_partitionable_partition(
                        partition, n_keys_map, partition_id, graph_mapper,
                        partitionable_graph)
                    if not added_constraints:
                        partition.add_constraints(constraints)
                    else:
                        self._check_constraints_equal(
                            constraints, partition.constraints)
                progress_bar.update()
            progress_bar.end()
        else:
            for vertex in partitioned_graph.subvertices:
                partitions = \
                    partitioned_graph.outgoing_edges_partitions_from_vertex(
                        vertex)
                for partition_id in partitions:
                    partition = partitions[partition_id]
                    added_constraints = False
                    constraints = self._process_partitioned_partition(
                        partition, n_keys_map, partition_id, partitioned_graph)
                    if not added_constraints:
                        partition.add_constraints(constraints)
                    else:
                        self._check_constraints_equal(
                            constraints, partition.constraints)
                progress_bar.update()
            progress_bar.end()

        return {'n_keys_map': n_keys_map}
예제 #9
0
    def _get_synaptic_data(self, as_list, data_to_get):

        post_vertex = self._projection_edge.post_vertex
        pre_vertex = self._projection_edge.pre_vertex

        # If in virtual board mode, the connection data should be set
        if self._virtual_connection_list is not None:
            post_vertex = self._projection_edge.post_vertex
            pre_vertex = self._projection_edge.pre_vertex
            return ConnectionHolder(
                data_to_get, as_list, pre_vertex.n_atoms, post_vertex.n_atoms,
                self._virtual_connection_list)

        connection_holder = ConnectionHolder(
            data_to_get, as_list, pre_vertex.n_atoms, post_vertex.n_atoms)

        # If we haven't run, add the holder to get connections, and return it
        if not self._spinnaker.has_ran:

            post_vertex.add_pre_run_connection_holder(
                connection_holder, self._projection_edge,
                self._synapse_information)
            return connection_holder

        # Otherwise, get the connections now
        graph_mapper = self._spinnaker.graph_mapper
        placements = self._spinnaker.placements
        transceiver = self._spinnaker.transceiver
        routing_infos = self._spinnaker.routing_infos
        partitioned_graph = self._spinnaker.partitioned_graph
        subedges = graph_mapper.get_partitioned_edges_from_partitionable_edge(
            self._projection_edge)
        progress = ProgressBar(
            len(subedges),
            "Getting {}s for projection between {} and {}".format(
                data_to_get, pre_vertex.label, post_vertex.label))
        for subedge in subedges:
            placement = placements.get_placement_of_subvertex(
                subedge.post_subvertex)
            connections = post_vertex.get_connections_from_machine(
                transceiver, placement, subedge, graph_mapper, routing_infos,
                self._synapse_information, partitioned_graph)
            if connections is not None:
                connection_holder.add_connections(connections)
            progress.update()
        progress.end()
        connection_holder.finish()
        return connection_holder
예제 #10
0
    def __call__(self, subgraph, graph_mapper):
        """
        :param subgraph: the subgraph whose edges are to be filtered
        :param graph_mapper: the graph mapper between partitionable and \
                partitioned graphs.
        :return: a new graph mapper and partitioned graph
        """
        new_sub_graph = PartitionedGraph(label=subgraph.label)
        new_graph_mapper = GraphMapper(graph_mapper.first_graph_label,
                                       subgraph.label)

        # create progress bar
        progress_bar = ProgressBar(
            len(subgraph.subvertices) + len(subgraph.subedges),
            "Filtering edges")

        # add the subverts directly, as they wont be pruned.
        for subvert in subgraph.subvertices:
            new_sub_graph.add_subvertex(subvert)
            associated_vertex = graph_mapper.get_vertex_from_subvertex(subvert)
            vertex_slice = graph_mapper.get_subvertex_slice(subvert)
            new_graph_mapper.add_subvertex(subvertex=subvert,
                                           vertex_slice=vertex_slice,
                                           vertex=associated_vertex)
            progress_bar.update()

        # start checking subedges to decide which ones need pruning....
        for subvert in subgraph.subvertices:
            out_going_partitions = \
                subgraph.outgoing_edges_partitions_from_vertex(subvert)
            for partitioner_identifier in out_going_partitions:
                for subedge in \
                        out_going_partitions[partitioner_identifier].edges:
                    if not self._is_filterable(subedge, graph_mapper):
                        logger.debug(
                            "this subedge was not pruned {}".format(subedge))
                        new_sub_graph.add_subedge(subedge,
                                                  partitioner_identifier)
                        associated_edge = graph_mapper.\
                            get_partitionable_edge_from_partitioned_edge(
                                subedge)
                        new_graph_mapper.add_partitioned_edge(
                            subedge, associated_edge)
                    else:
                        logger.debug(
                            "this subedge was pruned {}".format(subedge))
                    progress_bar.update()
        progress_bar.end()

        # returned the pruned partitioned_graph and graph_mapper
        return {
            'new_sub_graph': new_sub_graph,
            'new_graph_mapper': new_graph_mapper
        }
예제 #11
0
    def __call__(self, router_tables):
        tables = MulticastRoutingTables()
        previous_masks = dict()

        progress = ProgressBar(
            len(router_tables.routing_tables) * 2,
            "Compressing Routing Tables")

        # Create all masks without holes
        allowed_masks = [0xFFFFFFFFL - ((2**i) - 1) for i in range(33)]
    def __call__(self, subgraph, graph_mapper):
        """
        :param subgraph: the subgraph whose edges are to be updated
        :param graph_mapper: the graph mapper between partitionable and \
                partitioned graphs.
        """

        # create progress bar
        progress_bar = ProgressBar(
            len(subgraph.subedges), "Updating edge weights")

        # start checking subedges to decide which ones need pruning....
        for subedge in subgraph.subedges:
            if isinstance(subedge, AbstractWeightUpdatable):
                subedge.update_weight(graph_mapper)
            progress_bar.update()
        progress_bar.end()

        # return nothing
        return {'subgraph': subgraph}
    def __call__(self, router_tables, target_length=None):

        # build storage
        compressed_pacman_router_tables = MulticastRoutingTables()

        # create progress bar
        progress_bar = ProgressBar(
            len(router_tables.routing_tables), "Compressing routing Tables")

        # compress each router
        for router_table in router_tables.routing_tables:

            # convert to rig format
            entries = self._convert_to_mundy_format(router_table)

            # compress the router entries
            compressed_router_table_entries = \
                rigs_compressor.minimise(entries, target_length)

            # convert back to pacman model
            compressed_pacman_table = self._convert_to_pacman_router_table(
                compressed_router_table_entries, router_table.x,
                router_table.y)

            # add to new compressed routing tables
            compressed_pacman_router_tables.add_routing_table(
                compressed_pacman_table)

            progress_bar.update()
        progress_bar.end()

        # return
        return {'routing_tables': compressed_pacman_router_tables}
    def __call__(self, partitioned_graph, machine):
        """

        :param partitioned_graph: The partitioned_graph to measure
        :type partitioned_graph:\
                    :py:class:`pacman.model.partitioned_graph.partitioned_graph.PartitionedGraph`
        :return: The size of the graph in number of chips
        :rtype: int
        """

        # check that the algorithm can handle the constraints
        utility_calls.check_algorithm_can_support_constraints(
            constrained_vertices=partitioned_graph.subvertices,
            supported_constraints=[PlacerChipAndCoreConstraint],
            abstract_constraint_type=AbstractPlacerConstraint)

        ordered_subverts = utility_calls.sort_objects_by_constraint_authority(
            partitioned_graph.subvertices)

        # Iterate over subvertices and allocate
        progress_bar = ProgressBar(len(ordered_subverts),
                                   "Measuring the partitioned graph")
        resource_tracker = ResourceTracker(machine)
        for subvertex in ordered_subverts:
            resource_tracker.allocate_constrained_resources(
                subvertex.resources_required, subvertex.constraints)
            progress_bar.update()
        progress_bar.end()
        return {'n_chips': len(resource_tracker.keys)}
예제 #15
0
    def __call__(self, routing_infos, routing_table_by_partitions, machine):
        """

        :param routing_infos:
        :param routing_table_by_partitions:
        :param machine:
        :return:
        """
        progress_bar = ProgressBar(len(list(machine.chips)),
                                   "Generating routing tables")
        routing_tables = MulticastRoutingTables()
        for chip in machine.chips:
            partitions_in_table = routing_table_by_partitions.\
                get_entries_for_router(chip.x, chip.y)
            if len(partitions_in_table) != 0:
                routing_table = MulticastRoutingTable(chip.x, chip.y)
                for partition in partitions_in_table:
                    keys_and_masks = routing_infos.\
                        get_keys_and_masks_from_partition(partition)
                    entry = partitions_in_table[partition]
                    for key_and_mask in keys_and_masks:
                        multicast_routing_entry = MulticastRoutingEntry(
                            routing_entry_key=key_and_mask.key_combo,
                            defaultable=entry.defaultable,
                            mask=key_and_mask.mask,
                            link_ids=entry.out_going_links,
                            processor_ids=entry.out_going_processors)
                        routing_table.add_mutlicast_routing_entry(
                            multicast_routing_entry)
                routing_tables.add_routing_table(routing_table)
            progress_bar.update()
        progress_bar.end()

        return {"router_tables": routing_tables}
예제 #16
0
    def __call__(self, partitioned_graph, machine):

        # check that the algorithm can handle the constraints
        utility_calls.check_algorithm_can_support_constraints(
            constrained_vertices=partitioned_graph.subvertices,
            supported_constraints=[
                PlacerRadialPlacementFromChipConstraint,
                TagAllocatorRequireIptagConstraint,
                TagAllocatorRequireReverseIptagConstraint,
                PlacerChipAndCoreConstraint],
            abstract_constraint_type=AbstractPlacerConstraint)

        placements = Placements()
        ordered_subverts = utility_calls.sort_objects_by_constraint_authority(
            partitioned_graph.subvertices)

        # Iterate over subvertices and generate placements
        progress_bar = ProgressBar(len(ordered_subverts),
                                   "Placing graph vertices")
        resource_tracker = ResourceTracker(
            machine, self._generate_radial_chips(machine))
        for vertex in ordered_subverts:
            self._place_vertex(vertex, resource_tracker, machine, placements)
            progress_bar.update()
        progress_bar.end()
        return {'placements': placements}
예제 #17
0
def routing_info_report(report_folder, partitioned_graph, routing_infos):
    """ Generates a report which says which keys is being allocated to each\
        subvertex

    :param report_folder: the report folder to store this value
    :param partitioned_graph:
    :param routing_infos:
    """
    file_name = os.path.join(report_folder,
                             "virtual_key_space_information_report.rpt")
    output = None
    try:
        output = open(file_name, "w")
    except IOError:
        logger.error("generate virtual key space information report: "
                     "Can't open file {} for writing.".format(file_name))
    progress_bar = ProgressBar(len(partitioned_graph.subvertices),
                               "Generating Routing info report")
    for subvert in partitioned_graph.subvertices:
        output.write("Subvert: {} \n".format(subvert))
        partitions = \
            partitioned_graph.outgoing_edges_partitions_from_vertex(subvert)
        for partition in partitions.values():
            keys_and_masks = \
                routing_infos.get_keys_and_masks_from_partition(partition)
            for subedge in partition.edges:
                output.write("subedge:{}, keys_and_masks:{} \n".format(
                    subedge, keys_and_masks))
        output.write("\n\n")
        progress_bar.update()
    progress_bar.end()
    output.flush()
    output.close()
예제 #18
0
    def __call__(self, application_graph):
        """
        :param application_graph: app graph
        :return: the set of connection holders for after dsg generation
        """

        progress_bar = ProgressBar(
            application_graph.n_outgoing_edge_partitions,
            "Generating connection holders for reporting connection data.")

        data_holders = dict()
        for partition in application_graph.outgoing_edge_partitions:
            for edge in partition.edges:

                # add pre run generators so that reports can extract without
                # going to machine.
                if isinstance(edge, ProjectionApplicationEdge):

                    # build connection holders
                    connection_holder = ConnectionHolder(
                        None, True, edge.pre_vertex.n_atoms,
                        edge.post_vertex.n_atoms)

                    for synapse_information in edge.synapse_information:
                        edge.post_vertex.add_pre_run_connection_holder(
                            connection_holder, edge, synapse_information)

                        # store for the report generations
                        data_holders[(edge, synapse_information)] = \
                            connection_holder
            progress_bar.update()
        progress_bar.end()

        # return the two holders
        return data_holders
예제 #19
0
    def __call__(self, partitioned_graph, placements, buffer_manager,
                 ran_token):

        if not ran_token:
            raise exceptions.ConfigurationException(
                "The ran token has not been set")

        # Count the regions to be read
        n_regions_to_read = 0
        for vertex in partitioned_graph.subvertices:
            if isinstance(vertex, AbstractReceiveBuffersToHost):
                n_regions_to_read += len(vertex.get_buffered_regions())

        progress_bar = ProgressBar(n_regions_to_read,
                                   "Extracting buffers from the last run")

        # Read back the regions
        for vertex in partitioned_graph.subvertices:
            if isinstance(vertex, AbstractReceiveBuffersToHost):
                placement = placements.get_placement_of_subvertex(vertex)
                state_region = vertex.get_buffered_state_region()
                for region in vertex.get_buffered_regions():
                    buffer_manager.get_data_for_vertex(placement, region,
                                                       state_region)
                    progress_bar.update()
        progress_bar.end()
예제 #20
0
    def get_v(self, label, buffer_manager, region, state_region, placements,
              graph_mapper, partitionable_vertex):

        subvertices = \
            graph_mapper.get_subvertices_from_vertex(partitionable_vertex)

        ms_per_tick = self._machine_time_step / 1000.0

        data = list()
        missing_str = ""

        progress_bar = \
            ProgressBar(len(subvertices),
                        "Getting membrane voltage for {}".format(label))

        for subvertex in subvertices:

            vertex_slice = graph_mapper.get_subvertex_slice(subvertex)
            placement = placements.get_placement_of_subvertex(subvertex)

            x = placement.x
            y = placement.y
            p = placement.p

            # for buffering output info is taken form the buffer manager
            neuron_param_region_data_pointer, missing_data =\
                buffer_manager.get_data_for_vertex(
                    placement, region, state_region)
            if missing_data:
                missing_str += "({}, {}, {}); ".format(x, y, p)
            record_raw = neuron_param_region_data_pointer.read_all()
            record_length = len(record_raw)
            n_rows = record_length / ((vertex_slice.n_atoms + 1) * 4)
            record = (numpy.asarray(record_raw,
                                    dtype="uint8").view(dtype="<i4")).reshape(
                                        (n_rows, (vertex_slice.n_atoms + 1)))
            split_record = numpy.array_split(record, [1, 1], 1)
            record_time = numpy.repeat(split_record[0] * float(ms_per_tick),
                                       vertex_slice.n_atoms, 1)
            record_ids = numpy.tile(
                numpy.arange(vertex_slice.lo_atom, vertex_slice.hi_atom + 1),
                len(record_time)).reshape((-1, vertex_slice.n_atoms))
            record_membrane_potential = split_record[2] / 32767.0

            part_data = numpy.dstack(
                [record_ids, record_time, record_membrane_potential])
            part_data = numpy.reshape(part_data, [-1, 3])
            data.append(part_data)
            progress_bar.update()

        progress_bar.end()
        if len(missing_str) > 0:
            logger.warn(
                "Population {} is missing membrane voltage data in region {}"
                " from the following cores: {}".format(label, region,
                                                       missing_str))
        data = numpy.vstack(data)
        order = numpy.lexsort((data[:, 1], data[:, 0]))
        result = data[order]
        return result
예제 #21
0
    def __call__(self, machine_graph, graph_mapper):
        """
        :param machine_graph: the machine_graph whose edges are to be filtered
        :param graph_mapper: the graph mapper between graphs
        :return: a new graph mapper and machine graph
        """
        new_machine_graph = MachineGraph(label=machine_graph.label)
        new_graph_mapper = GraphMapper()

        # create progress bar
        progress_bar = ProgressBar(
            machine_graph.n_vertices +
            machine_graph.n_outgoing_edge_partitions, "Filtering edges")

        # add the vertices directly, as they wont be pruned.
        for vertex in machine_graph.vertices:
            new_machine_graph.add_vertex(vertex)
            associated_vertex = graph_mapper.get_application_vertex(vertex)
            vertex_slice = graph_mapper.get_slice(vertex)
            new_graph_mapper.add_vertex_mapping(
                machine_vertex=vertex,
                vertex_slice=vertex_slice,
                application_vertex=associated_vertex)
            progress_bar.update()

        # start checking edges to decide which ones need pruning....
        for partition in machine_graph.outgoing_edge_partitions:
            for edge in partition.edges:
                if not self._is_filterable(edge, graph_mapper):
                    logger.debug("this edge was not pruned {}".format(edge))
                    new_machine_graph.add_edge(edge, partition.identifier)
                    app_edge = graph_mapper.get_application_edge(edge)
                    new_graph_mapper.add_edge_mapping(edge, app_edge)

                    # add partition constraints from the original graph to
                    # the new graph
                    # add constraints from the application partition
                    new_machine_graph_partition = new_machine_graph.\
                        get_outgoing_edge_partition_starting_at_vertex(
                            edge.pre_vertex, partition.identifier)
                    new_machine_graph_partition.add_constraints(
                        partition.constraints)
                else:
                    logger.debug("this edge was pruned {}".format(edge))
            progress_bar.update()
        progress_bar.end()

        # returned the pruned graph and graph_mapper
        return new_machine_graph, new_graph_mapper
예제 #22
0
    def _write_router_provenance_data(self, router_tables, machine, txrx):
        """ Writes the provenance data of the router diagnostics

        :param router_tables: the routing tables generated by pacman
        :param machine: the spinnMachine object
        :param txrx: the transceiver object
        :return: None
        """
        progress = ProgressBar(machine.n_chips, "Getting Router Provenance")

        # acquire diagnostic data
        items = list()
        seen_chips = set()

        for router_table in sorted(
                router_tables.routing_tables,
                key=lambda table: (table.x, table.y)):
            x = router_table.x
            y = router_table.y
            if not machine.get_chip_at(x, y).virtual:
                router_diagnostic = txrx.get_router_diagnostics(x, y)
                seen_chips.add((x, y))
                reinjector_status = txrx.get_reinjection_status(x, y)
                items.extend(self._write_router_diagnostics(
                    x, y, router_diagnostic, reinjector_status, True))
                self._add_totals(router_diagnostic, reinjector_status)
            progress.update()

        for chip in sorted(machine.chips, key=lambda c: (c.x, c.y)):
            if not chip.virtual and (chip.x, chip.y) not in seen_chips:
                try:
                    diagnostic = txrx.get_router_diagnostics(chip.x, chip.y)

                    if (diagnostic.n_dropped_multicast_packets != 0 or
                            diagnostic.n_local_multicast_packets != 0 or
                            diagnostic.n_external_multicast_packets != 0):

                        reinjector_status = txrx.get_reinjection_status(
                            chip.x, chip.y)
                        items.extend(self._write_router_diagnostics(
                            chip.x, chip.y, diagnostic, reinjector_status,
                            False))
                        self._add_totals(diagnostic, reinjector_status)
                        progress.update()
                except Exception:
                    # There could be issues with unused chips - don't worry!
                    pass
        progress.end()
        return items
class UpdateRuntimeProcess(AbstractMultiConnectionProcess):

    def __init__(self, connection_selector):
        AbstractMultiConnectionProcess.__init__(self, connection_selector)
        self._progress_bar = None

    def receive_response(self, response):
        if self._progress_bar is not None:
            self._progress_bar.update()

    def update_runtime(self, run_time, infinite_run, core_subsets, n_cores):
        self._progress_bar = ProgressBar(n_cores, "Updating run time")
        for core_subset in core_subsets:
            for processor_id in core_subset.processor_ids:
                self._send_request(
                    SCPUpdateRuntimeRequest(
                        core_subset.x, core_subset.y, processor_id,
                        run_time, infinite_run,
                        constants.SDP_PORTS.RUNNING_COMMAND_SDP_PORT.value),
                    callback=self.receive_response)
        self._finish()
        self._progress_bar.end()
        self.check_for_error()
예제 #24
0
    def __call__(self, report_folder, connection_holder, dsg_targets):
        """ converts synaptic matrix for every application edge.
        """

        # Update the print options to display everything
        import numpy
        print_opts = numpy.get_printoptions()
        numpy.set_printoptions(threshold=numpy.nan)

        if dsg_targets is None:
            raise SynapticConfigurationException(
                "dsg targets should not be none, used as a check for "
                "connection holder data to be generated")

        # generate folder for synaptic reports
        top_level_folder = os.path.join(report_folder,
                                        "synaptic_matrix_reports")
        if not os.path.exists(top_level_folder):
            os.mkdir(top_level_folder)

        # create progress bar
        progress = ProgressBar(len(connection_holder.keys()),
                               "Generating synaptic matrix reports")

        # for each application edge, write matrix in new file
        for application_edge, _ in connection_holder.keys():

            # only write matrix's for edges which have matrix's
            if isinstance(application_edge, ProjectionApplicationEdge):

                # figure new file name
                file_name = os.path.join(
                    top_level_folder,
                    "synaptic_matrix_for_application_edge_{}".format(
                        application_edge.label))

                # open writer
                output = None
                try:
                    output = open(file_name, "w")
                except IOError:
                    logger.error("Generate_placement_reports: Can't open file"
                                 " {} for writing.".format(file_name))

                # write all data for all synapse_information's in same file
                for info in application_edge.synapse_information:
                    this_connection_holder = connection_holder[(
                        application_edge, info)]
                    output.write("{}".format(this_connection_holder))
                output.flush()
                output.close()

            progress.update()
        progress.end()

        # Reset the print options
        numpy.set_printoptions(**print_opts)
예제 #25
0
    def get_spikes(self, label, buffer_manager, region, placements,
                   graph_mapper, application_vertex, base_key_function,
                   machine_time_step):

        results = list()
        missing_str = ""
        ms_per_tick = machine_time_step / 1000.0
        vertices = \
            graph_mapper.get_machine_vertices(application_vertex)
        progress_bar = ProgressBar(len(vertices),
                                   "Getting spikes for {}".format(label))

        for vertex in vertices:

            placement = placements.get_placement_of_vertex(vertex)
            vertex_slice = graph_mapper.get_slice(vertex)

            x = placement.x
            y = placement.y
            p = placement.p

            # Read the spikes
            raw_spike_data, data_missing = \
                buffer_manager.get_data_for_vertex(placement, region)
            if data_missing:
                missing_str += "({}, {}, {}); ".format(x, y, p)
            spike_data = str(raw_spike_data.read_all())
            number_of_bytes_written = len(spike_data)

            offset = 0
            while offset < number_of_bytes_written:
                eieio_header = EIEIODataHeader.from_bytestring(
                    spike_data, offset)
                offset += eieio_header.size
                timestamp = eieio_header.payload_base * ms_per_tick
                timestamps = numpy.repeat([timestamp], eieio_header.count)
                keys = numpy.frombuffer(spike_data,
                                        dtype="<u4",
                                        count=eieio_header.count,
                                        offset=offset)
                neuron_ids = ((keys - base_key_function(vertex)) +
                              vertex_slice.lo_atom)
                offset += eieio_header.count * 4
                results.append(numpy.dstack((neuron_ids, timestamps))[0])
            progress_bar.update()

        progress_bar.end()
        if len(missing_str) > 0:
            logger.warn(
                "Population {} is missing spike data in region {} from the"
                " following cores: {}".format(label, region, missing_str))
        if len(results) != 0:
            result = numpy.vstack(results)
            result = result[numpy.lexsort((result[:, 1], result[:, 0]))]
        else:
            result = []
        return result
예제 #26
0
def router_report_from_paths(
        report_folder, routing_tables, routing_infos, hostname,
        partitioned_graph, placements, machine):
    """ Generates a text file of routing paths

    :param routing_tables:
    :param report_folder:
    :param hostname:
    :param routing_infos:
    :param partitioned_graph:
    :param placements:
    :param machine:
    :return:
    """
    file_name = os.path.join(report_folder, "edge_routing_info.rpt")
    f_routing = None
    try:
        f_routing = open(file_name, "w")
    except IOError:
        logger.error("Generate_routing_reports: Can't open file {} for "
                     "writing.".format(file_name))

    f_routing.write("        Edge Routing Report\n")
    f_routing.write("        ===================\n\n")
    time_date_string = time.strftime("%c")
    f_routing.write("Generated: {}".format(time_date_string))
    f_routing.write(" for target machine '{}'".format(hostname))
    f_routing.write("\n\n")

    progress_bar = ProgressBar(len(partitioned_graph.subedges),
                               "Generating Routing path report")
    for edge in partitioned_graph.subedges:
        source_placement = placements.get_placement_of_subvertex(
            edge.pre_subvertex)
        destination_placement = placements.get_placement_of_subvertex(
            edge.post_subvertex)
        partition = partitioned_graph.get_partition_of_subedge(edge)
        key_and_mask = routing_infos.get_keys_and_masks_from_partition(
            partition)[0]
        path, number_of_entries = _search_route(
            source_placement, destination_placement, key_and_mask,
            routing_tables, machine)
        text = "**** SubEdge '{}', from vertex: '{}' to vertex: '{}'".format(
            edge.label, edge.pre_subvertex.label, edge.post_subvertex.label)
        text += " Takes path \n {}".format(path)
        f_routing.write(text)
        f_routing.write("\n")
        text = "Route length: {}\n".format(number_of_entries)
        f_routing.write(text)

        # End one entry:
        f_routing.write("\n")
        progress_bar.update()
    f_routing.flush()
    f_routing.close()
    progress_bar.end()
    def __call__(self,
                 placements,
                 graph,
                 hostname,
                 report_default_directory,
                 write_text_specs,
                 app_data_runtime_folder,
                 machine,
                 graph_mapper=None):

        # Keep the results
        dsg_targets = dict()

        # Keep delay extensions until the end
        delay_extension_placements = list()

        # create a progress bar for end users
        progress_bar = ProgressBar(len(list(placements.placements)),
                                   "Generating sPyNNaker data specifications")
        for placement in placements.placements:
            associated_vertex = graph_mapper.get_application_vertex(
                placement.vertex)

            if isinstance(associated_vertex, DelayExtensionVertex):
                delay_extension_placements.append(
                    (placement, associated_vertex))
            else:
                self._generate_data_spec_for_vertices(
                    placement, associated_vertex, dsg_targets, hostname,
                    report_default_directory, write_text_specs,
                    app_data_runtime_folder, machine)
                progress_bar.update()

        for placement, associated_vertex in delay_extension_placements:
            self._generate_data_spec_for_vertices(placement, associated_vertex,
                                                  dsg_targets, hostname,
                                                  report_default_directory,
                                                  write_text_specs,
                                                  app_data_runtime_folder,
                                                  machine)
            progress_bar.update()

        # finish the progress bar
        progress_bar.end()

        return dsg_targets
    def __call__(
            self, placements, graph_mapper, tags, executable_finder,
            partitioned_graph, partitionable_graph, routing_infos, hostname,
            report_default_directory, write_text_specs,
            app_data_runtime_folder):

        # Keep the results
        executable_targets = ExecutableTargets()
        dsg_targets = dict()

        # Keep delay extensions until the end
        delay_extension_placements = list()

        # create a progress bar for end users
        progress_bar = ProgressBar(len(list(placements.placements)),
                                   "Generating sPyNNaker data specifications")
        for placement in placements.placements:
            associated_vertex = graph_mapper.get_vertex_from_subvertex(
                placement.subvertex)

            if isinstance(associated_vertex, DelayExtensionVertex):
                delay_extension_placements.append(
                    (placement, associated_vertex))
            else:
                self._generate_data_spec_for_subvertices(
                    placement, associated_vertex, executable_targets,
                    dsg_targets, graph_mapper, tags, executable_finder,
                    partitioned_graph, partitionable_graph, routing_infos,
                    hostname, report_default_directory, write_text_specs,
                    app_data_runtime_folder)
                progress_bar.update()

        for placement, associated_vertex in delay_extension_placements:
            self._generate_data_spec_for_subvertices(
                placement, associated_vertex, executable_targets,
                dsg_targets, graph_mapper, tags, executable_finder,
                partitioned_graph, partitionable_graph, routing_infos,
                hostname, report_default_directory, write_text_specs,
                app_data_runtime_folder)
            progress_bar.update()

        # finish the progress bar
        progress_bar.end()

        return {'executable_targets': executable_targets,
                'dsg_targets': dsg_targets}
 def _run_for_core_subsets(self, core_subsets, transceiver):
     progress_bar = ProgressBar(len(core_subsets), "Extracting IOBUF")
     error_entries = list()
     warn_entries = list()
     io_buffers = list(transceiver.get_iobuf(core_subsets))
     for io_buffer in io_buffers:
         self._check_iobuf_for_error(io_buffer, error_entries, warn_entries)
         progress_bar.update()
     progress_bar.end()
     return io_buffers, error_entries, warn_entries
예제 #30
0
def partitioner_report(report_folder, hostname, graph, graph_mapper):
    """ Generate report on the placement of sub-vertices onto cores.
    """

    # Cycle through all vertices, and for each cycle through its sub-vertices.
    # For each sub-vertex, describe its core mapping.
    file_name = os.path.join(report_folder, "partitioned_by_vertex.rpt")
    f_place_by_vertex = None
    try:
        f_place_by_vertex = open(file_name, "w")
    except IOError:
        logger.error("Generate_placement_reports: Can't open file {} for"
                     " writing.".format(file_name))

    f_place_by_vertex.write(
        "        Placement Information by Vertex\n")
    f_place_by_vertex.write("        ===============================\n\n")
    time_date_string = time.strftime("%c")
    f_place_by_vertex.write("Generated: {}".format(time_date_string))
    f_place_by_vertex.write(" for target machine '{}'".format(hostname))
    f_place_by_vertex.write("\n\n")

    vertices = sorted(graph.vertices, key=lambda x: x.label)
    progress_bar = ProgressBar(len(vertices),
                               "Generating partitioner report")
    for v in vertices:
        vertex_name = v.label
        vertex_model = v.model_name
        num_atoms = v.n_atoms
        f_place_by_vertex.write(
            "**** Vertex: '{}'\n".format(vertex_name))
        f_place_by_vertex.write("Model: {}\n".format(vertex_model))
        f_place_by_vertex.write("Pop size: {}\n".format(num_atoms))
        f_place_by_vertex.write("Sub-vertices: \n")

        partitioned_vertices = \
            sorted(graph_mapper.get_subvertices_from_vertex(v),
                   key=lambda x: x.label)
        partitioned_vertices = \
            sorted(partitioned_vertices,
                   key=lambda x: graph_mapper.get_subvertex_slice(x).lo_atom)
        for sv in partitioned_vertices:
            lo_atom = graph_mapper.get_subvertex_slice(sv).lo_atom
            hi_atom = graph_mapper.get_subvertex_slice(sv).hi_atom
            num_atoms = hi_atom - lo_atom + 1
            my_string = "  Slice {}:{} ({} atoms) \n"\
                        .format(lo_atom, hi_atom, num_atoms)
            f_place_by_vertex.write(my_string)
            f_place_by_vertex.flush()
        f_place_by_vertex.write("\n")
        progress_bar.update()

    # Close file:
    f_place_by_vertex.close()
    progress_bar.end()
    def __call__(self, executable_targets, app_id, transceiver,
                 loaded_application_data_token):
        """ Go through the executable targets and load each binary to \
            everywhere and then send a start request to the cores that \
            actually use it
        """

        if not loaded_application_data_token:
            raise exceptions.ConfigurationException(
                "The token for having loaded the application data token is set"
                " to false and therefore I cannot run. Please fix and try "
                "again")

        progress_bar = ProgressBar(executable_targets.total_processors,
                                   "Loading executables onto the machine")
        for executable_target_key in executable_targets.binaries:
            file_reader = FileDataReader(executable_target_key)
            core_subset = executable_targets.get_cores_for_binary(
                executable_target_key)

            statinfo = os.stat(executable_target_key)
            size = statinfo.st_size

            # TODO there is a need to parse the binary and see if its
            # ITCM and DTCM requirements are within acceptable params for
            # operating on spinnaker. Currently there just a few safety
            # checks which may not be accurate enough.
            if size > constants.MAX_SAFE_BINARY_SIZE:
                logger.warn(
                    "The size of {} is large enough that its"
                    " possible that the binary may be larger than what is"
                    " supported by spinnaker currently. Please reduce the"
                    " binary size if it starts to behave strangely, or goes"
                    " into the WDOG state before starting.".format(
                        executable_target_key))
                if size > constants.MAX_POSSIBLE_BINARY_SIZE:
                    raise exceptions.ConfigurationException(
                        "The size of {} is too large and therefore"
                        " will very likely cause a WDOG state. Until a more"
                        " precise measurement of ITCM and DTCM can be produced"
                        " this is deemed as an error state. Please reduce the"
                        " size of your binary or circumvent this error check.".
                        format(executable_target_key))

            transceiver.execute_flood(core_subset, file_reader, app_id, size)

            acutal_cores_loaded = 0
            for chip_based in core_subset.core_subsets:
                for _ in chip_based.processor_ids:
                    acutal_cores_loaded += 1
            progress_bar.update(amount_to_add=acutal_cores_loaded)
        progress_bar.end()

        return {"LoadBinariesToken": True}
예제 #32
0
    def get_synaptic_list_from_machine(self, graph_mapper, partitioned_graph,
                                       placements, transceiver, routing_infos):
        """ Get synaptic data for all connections in this Projection from the\
            machine.
        """
        if self._stored_synaptic_data_from_machine is None:
            timer = None
            if conf.config.getboolean("Reports", "display_algorithm_timings"):
                timer = Timer()
                timer.start_timing()

            subedges = \
                graph_mapper.get_partitioned_edges_from_partitionable_edge(
                    self)
            if subedges is None:
                subedges = list()

            synaptic_list = copy.copy(self._synapse_list)
            synaptic_list_rows = synaptic_list.get_rows()
            progress_bar = ProgressBar(
                len(subedges),
                "Reading back synaptic matrix for edge between"
                " {} and {}".format(self._pre_vertex.label,
                                    self._post_vertex.label))
            for subedge in subedges:
                n_rows = subedge.get_n_rows(graph_mapper)
                pre_vertex_slice = \
                    graph_mapper.get_subvertex_slice(subedge.pre_subvertex)
                post_vertex_slice = \
                    graph_mapper.get_subvertex_slice(subedge.post_subvertex)

                sub_edge_post_vertex = \
                    graph_mapper.get_vertex_from_subvertex(
                        subedge.post_subvertex)
                rows = sub_edge_post_vertex.get_synaptic_list_from_machine(
                    placements, transceiver, subedge.pre_subvertex, n_rows,
                    subedge.post_subvertex,
                    self._synapse_row_io, partitioned_graph,
                    routing_infos, subedge.weight_scales).get_rows()

                for i in range(len(rows)):
                    synaptic_list_rows[
                        i + pre_vertex_slice.lo_atom].set_slice_values(
                            rows[i], vertex_slice=post_vertex_slice)
                progress_bar.update()
            progress_bar.end()
            self._stored_synaptic_data_from_machine = synaptic_list
            if conf.config.getboolean("Reports", "display_algorithm_timings"):
                logger.info("Time to read matrix: {}".format(
                    timer.take_sample()))

        return self._stored_synaptic_data_from_machine
예제 #33
0
    def _do_allocation(self, ordered_subverts, placements, machine):

        # Iterate over subvertices and generate placements
        progress_bar = ProgressBar(len(ordered_subverts),
                                   "Placing graph vertices")
        resource_tracker = ResourceTracker(
            machine, self._generate_radial_chips(machine))

        # iterate over subverts
        for subvertex_list in ordered_subverts:

            # if too many one to ones to fit on a chip, allocate individually
            if len(subvertex_list) > self.MAX_CORES_PER_CHIP_TO_CONSIDER:
                for subvertex in subvertex_list:
                    self._allocate_individual(subvertex, placements,
                                              progress_bar, resource_tracker)
            else:  # can allocate in one block

                # merge constraints
                placement_constraint, ip_tag_constraints, \
                    reverse_ip_tag_constraints = \
                    self._merge_constraints(subvertex_list)
                # locate most cores on a chip
                max_size_on_a_chip = resource_tracker.\
                    max_available_cores_on_chips_that_satisfy(
                        placement_constraint, ip_tag_constraints,
                        reverse_ip_tag_constraints)

                # if size fits block allocate, otherwise allocate individually
                if max_size_on_a_chip < len(subvertex_list):

                    # collect resource requirement
                    resources = list()
                    for subvert in subvertex_list:
                        resources.append(subvert.resources_required)

                    # get cores
                    cores = resource_tracker.allocate_group(
                        resources, placement_constraint, ip_tag_constraints,
                        reverse_ip_tag_constraints)

                    # allocate cores to subverts
                    for subvert, (x, y, p, _, _) in zip(subvertex_list, cores):
                        placement = Placement(subvert, x, y, p)
                        placements.add_placement(placement)
                        progress_bar.update()
                else:
                    for subvertex in subvertex_list:
                        self._allocate_individual(subvertex, placements,
                                                  progress_bar,
                                                  resource_tracker)
        progress_bar.end()
 def _run_for_placements(self, placements, transceiver):
     io_buffers = list()
     error_entries = list()
     warn_entries = list()
     progress_bar = ProgressBar(len(placements), "Extracting IOBUF")
     for placement in placements:
         iobuf = transceiver.get_iobuf_from_core(placement.x, placement.y,
                                                 placement.p)
         io_buffers.append(iobuf)
         self._check_iobuf_for_error(iobuf, error_entries, warn_entries)
         progress_bar.update()
     progress_bar.end()
     return io_buffers, error_entries, warn_entries
예제 #35
0
    def get_spikes(self, label, buffer_manager, region, state_region,
                   placements, graph_mapper, partitionable_vertex):

        spike_times = list()
        spike_ids = list()
        ms_per_tick = self._machine_time_step / 1000.0

        subvertices = \
            graph_mapper.get_subvertices_from_vertex(partitionable_vertex)

        missing_str = ""

        progress_bar = ProgressBar(len(subvertices),
                                   "Getting spikes for {}".format(label))
        for subvertex in subvertices:

            placement = placements.get_placement_of_subvertex(subvertex)
            subvertex_slice = graph_mapper.get_subvertex_slice(subvertex)

            x = placement.x
            y = placement.y
            p = placement.p
            lo_atom = subvertex_slice.lo_atom

            # Read the spikes
            n_words = int(math.ceil(subvertex_slice.n_atoms / 32.0))
            n_bytes_per_block = n_words * 4

            # for buffering output info is taken form the buffer manager
            neuron_param_region_data_pointer, data_missing = \
                buffer_manager.get_data_for_vertex(
                    placement, region, state_region)
            if data_missing:
                missing_str += "({}, {}, {}); ".format(x, y, p)
            raw_data = neuron_param_region_data_pointer.read_all()
            offset = 0
            while offset < len(raw_data):
                ((time, n_blocks), offset) = (
                    struct.unpack_from("<II", raw_data, offset), offset + 8)
                (spike_data, offset) = (numpy.frombuffer(
                    raw_data, dtype="uint8",
                    count=n_bytes_per_block * n_blocks, offset=offset),
                    offset + (n_bytes_per_block * n_blocks))
                spikes = spike_data.view("<i4").byteswap().view("uint8")
                bits = numpy.fliplr(numpy.unpackbits(spikes).reshape(
                    (-1, 32))).reshape((-1, n_bytes_per_block * 8))
                indices = numpy.nonzero(bits)[1]
                times = numpy.repeat([time * ms_per_tick], len(indices))
                indices = indices + lo_atom
                spike_ids.append(indices)
                spike_times.append(times)
            progress_bar.update()

        progress_bar.end()
        if len(missing_str) > 0:
            logger.warn(
                "Population {} is missing spike data in region {} from the"
                " following cores: {}".format(label, region, missing_str))

        spike_ids = numpy.hstack(spike_ids)
        spike_times = numpy.hstack(spike_times)
        result = numpy.dstack((spike_ids, spike_times))[0]
        return result[numpy.lexsort((spike_times, spike_ids))]
예제 #36
0
    def get_spikes(self, label, buffer_manager, region, state_region,
                   placements, graph_mapper, partitionable_vertex):

        spike_times = list()
        spike_ids = list()
        ms_per_tick = self._machine_time_step / 1000.0

        subvertices = \
            graph_mapper.get_subvertices_from_vertex(partitionable_vertex)

        missing_str = ""

        progress_bar = ProgressBar(len(subvertices),
                                   "Getting spikes for {}".format(label))
        for subvertex in subvertices:

            placement = placements.get_placement_of_subvertex(subvertex)
            subvertex_slice = graph_mapper.get_subvertex_slice(subvertex)

            x = placement.x
            y = placement.y
            p = placement.p
            lo_atom = subvertex_slice.lo_atom

            # Read the spikes
            n_words = int(math.ceil(subvertex_slice.n_atoms / 32.0))
            n_bytes = n_words * 4
            n_words_with_timestamp = n_words + 1

            # for buffering output info is taken form the buffer manager
            neuron_param_region_data_pointer, data_missing = \
                buffer_manager.get_data_for_vertex(
                    placement, region, state_region)
            if data_missing:
                missing_str += "({}, {}, {}); ".format(x, y, p)
            record_raw = neuron_param_region_data_pointer.read_all()
            raw_data = (numpy.asarray(record_raw, dtype="uint8").
                        view(dtype="<i4")).reshape(
                [-1, n_words_with_timestamp])
            split_record = numpy.array_split(raw_data, [1, 1], 1)
            record_time = split_record[0] * float(ms_per_tick)
            spikes = split_record[2].byteswap().view("uint8")
            bits = numpy.fliplr(numpy.unpackbits(spikes).reshape(
                (-1, 32))).reshape((-1, n_bytes * 8))
            time_indices, indices = numpy.where(bits == 1)
            times = record_time[time_indices].reshape((-1))
            indices = indices + lo_atom
            spike_ids.append(indices)
            spike_times.append(times)
            progress_bar.update()

        progress_bar.end()
        if len(missing_str) > 0:
            logger.warn(
                "Population {} is missing spike data in region {} from the"
                " following cores: {}".format(label, region, missing_str))

        spike_ids = numpy.hstack(spike_ids)
        spike_times = numpy.hstack(spike_times)
        result = numpy.dstack((spike_ids, spike_times))[0]
        return result[numpy.lexsort((spike_times, spike_ids))]