Exemplo n.º 1
0
    def run(self, subgraph, graph_mapper):
        new_sub_graph = PartitionedGraph(label=subgraph.label)
        new_graph_mapper = GraphMapper(graph_mapper.first_graph_label,
                                       subgraph.label)

        # create progress bar
        progress_bar = ProgressBar(
            len(subgraph.subvertices) + len(subgraph.subedges),
            "Filtering edges")

        # add the subverts directly, as they wont be pruned.
        for subvert in subgraph.subvertices:
            new_sub_graph.add_subvertex(subvert)
            associated_vertex = graph_mapper.get_vertex_from_subvertex(subvert)
            vertex_slice = graph_mapper.get_subvertex_slice(subvert)
            new_graph_mapper.add_subvertex(
                subvertex=subvert, vertex_slice=vertex_slice,
                vertex=associated_vertex)
            progress_bar.update()

        # start checking subedges to decide which ones need pruning....
        for subedge in subgraph.subedges:
            if not self._is_filterable(subedge, graph_mapper):
                logger.debug("this subedge was not pruned {}".format(subedge))
                new_sub_graph.add_subedge(subedge)
                associated_edge = graph_mapper.\
                    get_partitionable_edge_from_partitioned_edge(subedge)
                new_graph_mapper.add_partitioned_edge(subedge, associated_edge)
            else:
                logger.debug("this subedge was pruned {}".format(subedge))
            progress_bar.update()
        progress_bar.end()

        # returned the pruned partitioned_graph and graph_mapper
        return new_sub_graph, new_graph_mapper
Exemplo n.º 2
0
    def reload_application_data(self, reload_application_data_items,
                                load_data=True):
        """

        :param reload_application_data_items:  the application data for each
        core which needs data to be reloaded to work
        :param load_data: a boolean which will not reload if set to false.
        :return: None
        """

        progress = ProgressBar(len(reload_application_data_items),
                               "Reloading Application Data")
        # fixme need to find a way to remove these private accesses (maybe
        # when the dsg in partitioned it will clear up)

        for reload_application_data in reload_application_data_items:
            if load_data:
                data_file = FileDataReader(reload_application_data.data_file)
                self._spinnaker_interface._txrx.write_memory(
                    reload_application_data.chip_x,
                    reload_application_data.chip_y,
                    reload_application_data.base_address, data_file,
                    reload_application_data.data_size)
                data_file.close()
            user_0_register_address = self._spinnaker_interface._txrx.\
                get_user_0_register_address_from_core(
                    reload_application_data.chip_x,
                    reload_application_data.chip_y,
                    reload_application_data.processor_id)
            self._spinnaker_interface._txrx.write_memory(
                reload_application_data.chip_x, reload_application_data.chip_y,
                user_0_register_address, reload_application_data.base_address)
            progress.update()
            self._total_processors += 1
        progress.end()
Exemplo n.º 3
0
 def load_initial_buffers(self):
     """ Load the initial buffers for the senders using mem writes
     """
     progress_bar = ProgressBar(len(self._sender_vertices),
                                "on loading buffer dependant vertices")
     for vertex in self._sender_vertices:
         for region in vertex.get_regions():
             self._send_initial_messages(vertex, region)
         progress_bar.update()
     progress_bar.end()
    def _load_application_data(
        self,
        placements,
        vertex_to_subvertex_mapper,
        processor_to_app_data_base_address,
        hostname,
        app_data_folder,
        verify=False,
    ):

        # go through the placements and see if there's any application data to
        # load
        progress_bar = ProgressBar(len(list(placements.placements)), "Loading application data onto the machine")
        for placement in placements.placements:
            associated_vertex = vertex_to_subvertex_mapper.get_vertex_from_subvertex(placement.subvertex)

            if isinstance(associated_vertex, AbstractDataSpecableVertex):
                logger.debug("loading application data for vertex {}".format(associated_vertex.label))
                key = (placement.x, placement.y, placement.p)
                start_address = processor_to_app_data_base_address[key]["start_address"]
                memory_written = processor_to_app_data_base_address[key]["memory_written"]
                file_path_for_application_data = associated_vertex.get_application_data_file_path(
                    placement.x, placement.y, placement.p, hostname, app_data_folder
                )
                application_data_file_reader = SpinnmanFileDataReader(file_path_for_application_data)
                logger.debug("writing application data for vertex {}".format(associated_vertex.label))
                self._txrx.write_memory(
                    placement.x, placement.y, start_address, application_data_file_reader, memory_written
                )
                application_data_file_reader.close()

                if verify:
                    application_data_file_reader = SpinnmanFileDataReader(file_path_for_application_data)
                    all_data = application_data_file_reader.readall()
                    read_data = self._txrx.read_memory(placement.x, placement.y, start_address, memory_written)
                    if read_data != all_data:
                        raise Exception(
                            "Miswrite of {}, {}, {}, {}".format(placement.x, placement.y, placement.p, start_address)
                        )
                    application_data_file_reader.close()

                # update user 0 so that it points to the start of the
                # applications data region on sdram
                logger.debug("writing user 0 address for vertex {}".format(associated_vertex.label))
                user_o_register_address = self._txrx.get_user_0_register_address_from_core(
                    placement.x, placement.y, placement.p
                )
                self._txrx.write_memory(placement.x, placement.y, user_o_register_address, start_address)

                # add lines to rerun_script if requested
                if self._reports_states.transciever_report:
                    self._reload_script.add_application_data(file_path_for_application_data, placement, start_address)
            progress_bar.update()
        progress_bar.end()
Exemplo n.º 5
0
    def get_gsyn(self, label, n_atoms, transceiver, region,
                 n_machine_time_steps, placements, graph_mapper,
                 partitionable_vertex):

        ms_per_tick = self._machine_time_step / 1000.0

        subvertices = \
            graph_mapper.get_subvertices_from_vertex(partitionable_vertex)

        tempfilehandle = tempfile.NamedTemporaryFile()
        data = numpy.memmap(
            tempfilehandle.file, shape=(n_machine_time_steps, n_atoms),
            dtype="float64,float64,float64,float64")
        data["f0"] = (numpy.arange(
            n_atoms * n_machine_time_steps) % n_atoms).reshape(
                (n_machine_time_steps, n_atoms))
        data["f1"] = numpy.repeat(numpy.arange(
            0, n_machine_time_steps * ms_per_tick, ms_per_tick),
            n_atoms).reshape((n_machine_time_steps, n_atoms))

        progress_bar = ProgressBar(
            len(subvertices), "Getting conductance for {}".format(label))
        for subvertex in subvertices:

            vertex_slice = graph_mapper.get_subvertex_slice(subvertex)
            placement = placements.get_placement_of_subvertex(subvertex)

            region_size = recording_utils.get_recording_region_size_in_bytes(
                n_machine_time_steps, 8 * vertex_slice.n_atoms)
            neuron_param_region_data = recording_utils.get_data(
                transceiver, placement, region, region_size)

            numpy_data = (numpy.asarray(
                neuron_param_region_data, dtype="uint8").view(dtype="<i4") /
                32767.0).reshape(
                    (n_machine_time_steps, vertex_slice.n_atoms * 2))
            data["f2"][:, vertex_slice.lo_atom:vertex_slice.hi_atom + 1] =\
                numpy_data[:, 0::2]
            data["f3"][:, vertex_slice.lo_atom:vertex_slice.hi_atom + 1] =\
                numpy_data[:, 1::2]
            progress_bar.update()

        progress_bar.end()
        data.shape = n_atoms * n_machine_time_steps

        # Sort the data - apparently, using lexsort is faster, but it might
        # consume more memory, so the option is left open for sort-in-place
        order = numpy.lexsort((data["f1"], data["f0"]))
        # data.sort(order=['f0', 'f1'], axis=0)

        result = data.view(dtype="float64").reshape(
            (n_atoms * n_machine_time_steps, 4))[order]
        return result
    def get_synaptic_list_from_machine(self, graph_mapper, partitioned_graph,
                                       placements, transceiver, routing_infos):
        """
        Get synaptic data for all connections in this Projection from the
        machine.
        """
        if self._stored_synaptic_data_from_machine is None:
            timer = None
            if conf.config.getboolean("Reports", "outputTimesForSections"):
                timer = Timer()
                timer.start_timing()

            subedges = \
                graph_mapper.get_partitioned_edges_from_partitionable_edge(
                    self)
            if subedges is None:
                subedges = list()

            synaptic_list = copy.copy(self._synapse_list)
            synaptic_list_rows = synaptic_list.get_rows()
            progress_bar = ProgressBar(
                len(subedges),
                "Reading back synaptic matrix for edge between"
                " {} and {}".format(self._pre_vertex.label,
                                    self._post_vertex.label))
            for subedge in subedges:
                n_rows = subedge.get_n_rows(graph_mapper)
                pre_vertex_slice = \
                    graph_mapper.get_subvertex_slice(subedge.pre_subvertex)
                post_vertex_slice = \
                    graph_mapper.get_subvertex_slice(subedge.post_subvertex)

                sub_edge_post_vertex = \
                    graph_mapper.get_vertex_from_subvertex(
                        subedge.post_subvertex)
                rows = sub_edge_post_vertex.get_synaptic_list_from_machine(
                    placements, transceiver, subedge.pre_subvertex, n_rows,
                    subedge.post_subvertex,
                    self._synapse_row_io, partitioned_graph,
                    routing_infos, subedge.weight_scales).get_rows()

                for i in range(len(rows)):
                    synaptic_list_rows[
                        i + pre_vertex_slice.lo_atom].set_slice_values(
                            rows[i], vertex_slice=post_vertex_slice)
                progress_bar.update()
            progress_bar.end()
            self._stored_synaptic_data_from_machine = synaptic_list
            if conf.config.getboolean("Reports", "outputTimesForSections"):
                logger.info("Time to read matrix: {}".format(
                    timer.take_sample()))

        return self._stored_synaptic_data_from_machine
    def get_synaptic_list_from_machine(self, graph_mapper, partitioned_graph,
                                       placements, transceiver, routing_infos):
        """
        Get synaptic data for all connections in this Projection from the
        machine.
        """
        if self._stored_synaptic_data_from_machine is None:
            timer = None
            if conf.config.getboolean("Reports", "outputTimesForSections"):
                timer = Timer()
                timer.start_timing()

            logger.debug(
                "Reading synapse data for edge between {} and {}".format(
                    self._pre_vertex.label, self._post_vertex.label))
            subedges = \
                graph_mapper.get_partitioned_edges_from_partitionable_edge(
                    self)
            if subedges is None:
                subedges = list()

            synaptic_list = copy.copy(self._synapse_list)
            synaptic_list_rows = synaptic_list.get_rows()
            progress_bar = ProgressBar(
                len(subedges), "progress on reading back synaptic matrix")
            for subedge in subedges:
                n_rows = subedge.get_n_rows(graph_mapper)
                pre_vertex_slice = \
                    graph_mapper.get_subvertex_slice(subedge.pre_subvertex)
                post_vertex_slice = \
                    graph_mapper.get_subvertex_slice(subedge.post_subvertex)

                sub_edge_post_vertex = \
                    graph_mapper.get_vertex_from_subvertex(
                        subedge.post_subvertex)
                rows = sub_edge_post_vertex.get_synaptic_list_from_machine(
                    placements, transceiver, subedge.pre_subvertex, n_rows,
                    subedge.post_subvertex, self._synapse_row_io,
                    partitioned_graph, routing_infos,
                    subedge.weight_scales).get_rows()

                for i in range(len(rows)):
                    synaptic_list_rows[
                        i + pre_vertex_slice.lo_atom].set_slice_values(
                            rows[i], vertex_slice=post_vertex_slice)
                progress_bar.update()
            progress_bar.end()
            self._stored_synaptic_data_from_machine = synaptic_list
            if conf.config.getboolean("Reports", "outputTimesForSections"):
                timer.take_sample()

        return self._stored_synaptic_data_from_machine
Exemplo n.º 8
0
    def generate_data_specifications(self):
        """ generates the dsg for the graph.

        :return:
        """

        # iterate though subvertexes and call generate_data_spec for each
        # vertex
        executable_targets = ExecutableTargets()

        # create a progress bar for end users
        progress_bar = ProgressBar(len(list(self._placements.placements)),
                                   "Generating data specifications")
        for placement in self._placements.placements:
            associated_vertex =\
                self._graph_mapper.get_vertex_from_subvertex(
                    placement.subvertex)

            # if the vertex can generate a DSG, call it
            if isinstance(associated_vertex, AbstractDataSpecableVertex):

                ip_tags = self._tags.get_ip_tags_for_vertex(
                    placement.subvertex)
                reverse_ip_tags = self._tags.get_reverse_ip_tags_for_vertex(
                    placement.subvertex)
                associated_vertex.generate_data_spec(
                    placement.subvertex, placement, self._partitioned_graph,
                    self._partitionable_graph, self._routing_infos,
                    self._hostname, self._graph_mapper,
                    self._report_default_directory, ip_tags, reverse_ip_tags,
                    self._write_text_specs, self._app_data_runtime_folder)

                # Get name of binary from vertex
                binary_name = associated_vertex.get_binary_file_name()

                # Attempt to find this within search paths
                binary_path = executable_finder.get_executable_path(
                    binary_name)
                if binary_path is None:
                    raise exceptions.ExecutableNotFoundException(binary_name)

                if not executable_targets.has_binary(binary_path):
                    executable_targets.add_binary(binary_path)
                executable_targets.add_processor(
                    binary_path, placement.x, placement.y, placement.p)

            progress_bar.update()

        # finish the progress bar
        progress_bar.end()

        return executable_targets
Exemplo n.º 9
0
    def generate_data_specifications(self):
        """ generates the dsg for the graph.

        :return:
        """

        # iterate though subvertexes and call generate_data_spec for each
        # vertex
        executable_targets = ExecutableTargets()

        # create a progress bar for end users
        progress_bar = ProgressBar(len(list(self._placements.placements)),
                                   "on generating data specifications")
        for placement in self._placements.placements:
            associated_vertex =\
                self._graph_mapper.get_vertex_from_subvertex(
                    placement.subvertex)

            # if the vertex can generate a DSG, call it
            if isinstance(associated_vertex, AbstractDataSpecableVertex):

                ip_tags = self._tags.get_ip_tags_for_vertex(
                    placement.subvertex)
                reverse_ip_tags = self._tags.get_reverse_ip_tags_for_vertex(
                    placement.subvertex)
                associated_vertex.generate_data_spec(
                    placement.subvertex, placement, self._partitioned_graph,
                    self._partitionable_graph, self._routing_infos,
                    self._hostname, self._graph_mapper,
                    self._report_default_directory, ip_tags, reverse_ip_tags,
                    self._writeTextSpecs, self._app_data_runtime_folder)
                progress_bar.update()

                # Get name of binary from vertex
                binary_name = associated_vertex.get_binary_file_name()

                # Attempt to find this within search paths
                binary_path = executable_finder.get_executable_path(
                    binary_name)
                if binary_path is None:
                    raise exceptions.ExecutableNotFoundException(binary_name)

                if not executable_targets.has_binary(binary_path):
                    executable_targets.add_binary(binary_path)
                executable_targets.add_processor(binary_path, placement.x,
                                                 placement.y, placement.p)

        # finish the progress bar
        progress_bar.end()

        return executable_targets
    def load_executable_images(self, executable_targets, app_id):
        """ Go through the executable targets and load each binary to \
            everywhere and then send a start request to the cores that \
            actually use it
        """

        progress_bar = ProgressBar(executable_targets.total_processors, "Loading executables onto the machine")
        for executable_target_key in executable_targets.binary_paths():
            file_reader = SpinnmanFileDataReader(executable_target_key)
            core_subset = executable_targets.retrieve_cores_for_a_executable_target(executable_target_key)

            statinfo = os.stat(executable_target_key)
            size = statinfo.st_size

            # TODO there is a need to parse the binary and see if its
            # ITCM and DTCM requirements are within acceptable params for
            # operating on spinnaker. Currnently there jsut a few safety
            # checks which may not be accurate enough.
            if size > constants.MAX_SAFE_BINARY_SIZE:
                logger.warn(
                    "The size of this binary is large enough that its"
                    " possible that the binary may be larger than what is"
                    " supported by spinnaker currently. Please reduce the"
                    " binary size if it starts to behave strangely, or goes"
                    " into the wdog state before starting."
                )
                if size > constants.MAX_POSSIBLE_BINARY_SIZE:
                    raise exceptions.ConfigurationException(
                        "The size of the binary is too large and therefore"
                        " will very likely cause a WDOG state. Until a more"
                        " precise measurement of ITCM and DTCM can be produced"
                        " this is deemed as an error state. Please reduce the"
                        " size of your binary or circumvent this error check."
                    )

            self._txrx.execute_flood(core_subset, file_reader, app_id, size)

            if self._reports_states.transciever_report:
                self._reload_script.add_binary(executable_target_key, core_subset)
            acutal_cores_loaded = 0
            for chip_based in core_subset.core_subsets:
                for _ in chip_based.processor_ids:
                    acutal_cores_loaded += 1
            progress_bar.update(amount_to_add=acutal_cores_loaded)
        progress_bar.end()
    def load_routing_tables(self, router_tables, app_id):
        progress_bar = ProgressBar(len(list(router_tables.routing_tables)), "Loading routing data onto the machine")

        # load each router table that is needed for the application to run into
        # the chips sdram
        for router_table in router_tables.routing_tables:
            if not self._machine.get_chip_at(router_table.x, router_table.y).virtual:
                self._txrx.clear_multicast_routes(router_table.x, router_table.y)
                self._txrx.clear_router_diagnostic_counters(router_table.x, router_table.y)

                if len(router_table.multicast_routing_entries) > 0:
                    self._txrx.load_multicast_routes(
                        router_table.x, router_table.y, router_table.multicast_routing_entries, app_id=app_id
                    )
                    if self._reports_states.transciever_report:
                        self._reload_script.add_routing_table(router_table)
            progress_bar.update()
        progress_bar.end()
Exemplo n.º 12
0
    def _set_up_send_buffering(self):
        progress_bar = ProgressBar(
            len(self.partitioned_graph.subvertices),
            "on initialising the buffer managers for vertices which require"
            " buffering")

        # Create the buffer manager
        self._send_buffer_manager = BufferManager(
            self._placements, self._routing_infos, self._tags, self._txrx)

        for partitioned_vertex in self.partitioned_graph.subvertices:
            if isinstance(partitioned_vertex,
                          AbstractSendsBuffersFromHostPartitionedVertex):

                # Add the vertex to the managed vertices
                self._send_buffer_manager.add_sender_vertex(
                    partitioned_vertex)
            progress_bar.update()
        progress_bar.end()
Exemplo n.º 13
0
    def get_spikes(self, label, transceiver, region, placements, graph_mapper,
                   partitionable_vertex):

        results = list()
        ms_per_tick = self._machine_time_step / 1000.0
        subvertices = \
            graph_mapper.get_subvertices_from_vertex(partitionable_vertex)
        progress_bar = ProgressBar(len(subvertices),
                                   "Getting spikes for {}".format(label))

        for subvertex in subvertices:

            placement = placements.get_placement_of_subvertex(subvertex)
            subvertex_slice = graph_mapper.get_subvertex_slice(subvertex)

            # Read the spikes
            spike_data = recording_utils.get_data(
                transceiver, placement, region, subvertex.region_size)

            number_of_bytes_written = len(spike_data)

            offset = 0
            while offset < number_of_bytes_written:
                eieio_header = EIEIODataHeader.from_bytestring(
                    spike_data, offset)
                offset += eieio_header.size
                timestamp = eieio_header.payload_base * ms_per_tick
                timestamps = numpy.repeat([timestamp], eieio_header.count)
                keys = numpy.frombuffer(
                    spike_data, dtype="<u4", count=eieio_header.count,
                    offset=offset)
                neuron_ids = \
                    (keys - subvertex.base_key) + subvertex_slice.lo_atom
                offset += eieio_header.count * 4
                results.append(numpy.dstack((neuron_ids, timestamps))[0])
            progress_bar.update()

        progress_bar.end()
        result = numpy.vstack(results)
        result = result[numpy.lexsort((result[:, 1], result[:, 0]))]
        return result
Exemplo n.º 14
0
    def get_spikes(self, label, transceiver, region, n_machine_time_steps,
                   placements, graph_mapper, partitionable_vertex):

        spike_times = list()
        spike_ids = list()
        ms_per_tick = self._machine_time_step / 1000.0

        subvertices = \
            graph_mapper.get_subvertices_from_vertex(partitionable_vertex)

        progress_bar = ProgressBar(len(subvertices),
                                   "Getting spikes for {}".format(label))
        for subvertex in subvertices:

            placement = placements.get_placement_of_subvertex(subvertex)
            subvertex_slice = graph_mapper.get_subvertex_slice(subvertex)

            lo_atom = subvertex_slice.lo_atom

            # Read the spikes
            n_bytes = int(math.ceil(subvertex_slice.n_atoms / 32.0)) * 4
            region_size = recording_utils.get_recording_region_size_in_bytes(
                n_machine_time_steps, n_bytes)
            spike_data = recording_utils.get_data(
                transceiver, placement, region, region_size)
            numpy_data = numpy.asarray(spike_data, dtype="uint8").view(
                dtype="uint32").byteswap().view("uint8")
            bits = numpy.fliplr(numpy.unpackbits(numpy_data).reshape(
                (-1, 32))).reshape((-1, n_bytes * 8))
            times, indices = numpy.where(bits == 1)
            times = times * ms_per_tick
            indices = indices + lo_atom
            spike_ids.append(indices)
            spike_times.append(times)
            progress_bar.update()

        progress_bar.end()
        spike_ids = numpy.hstack(spike_ids)
        spike_times = numpy.hstack(spike_times)
        result = numpy.dstack((spike_ids, spike_times))[0]
        return result[numpy.lexsort((spike_times, spike_ids))]
    def set_up_send_buffering(self, partitioned_graph, placements, tags):
        """
        interface for buffered vertices
        :param partitioned_graph: the partitioned graph object
        :param placements: the placements object
        :param tags: the tags object
        :return: None
        """
        progress_bar = ProgressBar(len(partitioned_graph.subvertices), "Initialising buffers")

        # Create the buffer manager
        self._send_buffer_manager = BufferManager(
            placements, tags, self._txrx, self._reports_states, self._app_data_folder, self._reload_script
        )

        for partitioned_vertex in partitioned_graph.subvertices:
            if isinstance(partitioned_vertex, AbstractSendsBuffersFromHostPartitionedVertex):

                # Add the vertex to the managed vertices
                self._send_buffer_manager.add_sender_vertex(partitioned_vertex)
            progress_bar.update()
        progress_bar.end()
Exemplo n.º 16
0
    def run(self, subgraph, graph_mapper):
        new_sub_graph = PartitionedGraph(label=subgraph.label)
        new_graph_mapper = GraphMapper(graph_mapper.first_graph_label,
                                       subgraph.label)

        # create progress bar
        progress_bar = \
            ProgressBar(len(subgraph.subvertices) + len(subgraph.subedges),
                        "on checking which subedges are filterable given "
                        "heuristics")

        # add the subverts directly, as they wont be pruned.
        for subvert in subgraph.subvertices:
            new_sub_graph.add_subvertex(subvert)
            associated_vertex = graph_mapper.get_vertex_from_subvertex(subvert)
            vertex_slice = graph_mapper.get_subvertex_slice(subvert)
            new_graph_mapper.add_subvertex(subvertex=subvert,
                                           vertex_slice=vertex_slice,
                                           vertex=associated_vertex)
            progress_bar.update()

        # start checking subedges to decide which ones need pruning....
        for subedge in subgraph.subedges:
            if not self._is_filterable(subedge, graph_mapper):
                logger.debug("this subedge was not pruned {}".format(subedge))
                new_sub_graph.add_subedge(subedge)
                associated_edge = graph_mapper.\
                    get_partitionable_edge_from_partitioned_edge(subedge)
                new_graph_mapper.add_partitioned_edge(subedge, associated_edge)
            else:
                logger.debug("this subedge was pruned {}".format(subedge))
            progress_bar.update()
        progress_bar.end()

        # returned the pruned partitioned_graph and graph_mapper
        return new_sub_graph, new_graph_mapper
Exemplo n.º 17
0
    def get_spikes(
            self, txrx, placements, graph_mapper, compatible_output=False):
        """
        Return a 2-column numpy array containing cell ids and spike times for
        recorded cells.   This is read directly from the memory for the board.

        :param transceiver:
        :param placements:
        :param graph_mapper:
        :param compatible_output:
        """

        logger.info("Getting spikes for {}".format(self._label))

        # Find all the sub-vertices that this pynn_population.py exists on
        subvertices = graph_mapper.get_subvertices_from_vertex(self)
        progress_bar = ProgressBar(len(subvertices), "Getting spikes")
        results = list()
        for subvertex in subvertices:
            placement = placements.get_placement_of_subvertex(subvertex)
            (x, y, p) = placement.x, placement.y, placement.p
            subvertex_slice = graph_mapper.get_subvertex_slice(subvertex)
            lo_atom = subvertex_slice.lo_atom
            hi_atom = subvertex_slice.hi_atom

            logger.debug("Reading spikes from chip {}, {}, core {}, "
                         "lo_atom {} hi_atom {}".format(
                             x, y, p, lo_atom, hi_atom))

            # Get the App Data for the core
            app_data_base_address = \
                txrx.get_cpu_information_from_core(x, y, p).user[0]

            # Get the position of the spike buffer
            spike_region_base_address_offset = \
                dsg_utility_calls.get_region_base_address_offset(
                    app_data_base_address,
                    self._SPIKE_SOURCE_REGIONS
                    .SPIKE_DATA_RECORDED_REGION.value)
            spike_region_base_address_buf = buffer(txrx.read_memory(
                x, y, spike_region_base_address_offset, 4))
            spike_region_base_address = struct.unpack_from(
                "<I", spike_region_base_address_buf)[0]
            spike_region_base_address += app_data_base_address

            # Read the spike data size
            number_of_bytes_written_buf = buffer(txrx.read_memory(
                x, y, spike_region_base_address, 4))
            number_of_bytes_written = struct.unpack_from(
                "<I", number_of_bytes_written_buf)[0]

            # check that the number of spikes written is smaller or the same as
            # the size of the memory region we allocated for spikes
            send_buffer = self._get_spike_send_buffer(subvertex_slice)
            if number_of_bytes_written > send_buffer.total_region_size:
                raise exceptions.MemReadException(
                    "the amount of memory written ({}) was larger than was "
                    "allocated for it ({})"
                    .format(number_of_bytes_written,
                            send_buffer.total_region_size))

            # Read the spikes
            logger.debug("Reading {} ({}) bytes starting at {} + 4"
                         .format(number_of_bytes_written,
                                 hex(number_of_bytes_written),
                                 hex(spike_region_base_address)))
            spike_data_block = txrx.read_memory(
                x, y, spike_region_base_address + 4, number_of_bytes_written)

            # translate block of spikes into EIEIO messages
            offset = 0
            while offset <= number_of_bytes_written - 4:
                eieio_header = EIEIODataHeader.from_bytestring(
                    spike_data_block, offset)
                offset += eieio_header.size
                timestamps = numpy.repeat([eieio_header.payload_base],
                                          eieio_header.count)
                keys = numpy.frombuffer(
                    spike_data_block, dtype="<u4", count=eieio_header.count,
                    offset=offset)
                neuron_ids = ((keys - subvertex.base_key) +
                              subvertex_slice.lo_atom)
                offset += eieio_header.count * 4
                results.append(numpy.dstack((neuron_ids, timestamps))[0])

            # complete the buffer
            progress_bar.update()
        progress_bar.end()

        result = numpy.vstack(results)
        result = result[numpy.lexsort((result[:, 1], result[:, 0]))]
        return result
Exemplo n.º 18
0
    def run(self, run_time):
        """

        :param run_time:
        :return:
        """
        # sort out config param to be valid types
        width = config.get("Machine", "width")
        height = config.get("Machine", "height")
        if width == "None":
            width = None
        else:
            width = int(width)
        if height == "None":
            height = None
        else:
            height = int(height)

        number_of_boards = config.get("Machine", "number_of_boards")
        if number_of_boards == "None":
            number_of_boards = None

        self.setup_interfaces(
            hostname=self._hostname,
            bmp_details=config.get("Machine", "bmp_names"),
            downed_chips=config.get("Machine", "down_chips"),
            downed_cores=config.get("Machine", "down_cores"),
            board_version=config.getint("Machine", "version"),
            number_of_boards=number_of_boards, width=width, height=height,
            is_virtual=config.getboolean("Machine", "virtual_board"),
            virtual_has_wrap_arounds=config.getboolean(
                "Machine", "requires_wrap_arounds"),
            auto_detect_bmp=config.getboolean("Machine", "auto_detect_bmp"))

        # adds extra stuff needed by the reload script which cannot be given
        # directly.
        if self._reports_states.transciever_report:
            self._reload_script.runtime = run_time
            self._reload_script.time_scale_factor = self._time_scale_factor

        # create network report if needed
        if self._reports_states is not None:
            reports.network_specification_partitionable_report(
                self._report_default_directory, self._partitionable_graph,
                self._hostname)

        # calculate number of machine time steps
        if run_time is not None:
            self._no_machine_time_steps =\
                int((run_time * 1000.0) / self._machine_time_step)
            ceiled_machine_time_steps = \
                math.ceil((run_time * 1000.0) / self._machine_time_step)
            if self._no_machine_time_steps != ceiled_machine_time_steps:
                raise common_exceptions.ConfigurationException(
                    "The runtime and machine time step combination result in "
                    "a factional number of machine runable time steps and "
                    "therefore spinnaker cannot determine how many to run for")
            for vertex in self._partitionable_graph.vertices:
                if isinstance(vertex, AbstractDataSpecableVertex):
                    vertex.set_no_machine_time_steps(
                        self._no_machine_time_steps)
        else:
            self._no_machine_time_steps = None
            logger.warn("You have set a runtime that will never end, this may"
                        "cause the neural models to fail to partition "
                        "correctly")
            for vertex in self._partitionable_graph.vertices:
                if (isinstance(vertex, AbstractPopulationRecordableVertex) and
                        vertex.record):
                    raise common_exceptions.ConfigurationException(
                        "recording a population when set to infinite runtime "
                        "is not currently supportable in this tool chain."
                        "watch this space")

        do_timing = config.getboolean("Reports", "outputTimesForSections")
        if do_timing:
            timer = Timer()
        else:
            timer = None

        self.set_runtime(run_time)
        logger.info("*** Running Mapper *** ")
        if do_timing:
            timer.start_timing()
        self.map_model()
        if do_timing:
            timer.take_sample()

        # add database generation if requested
        needs_database = self._auto_detect_database(self._partitioned_graph)
        user_create_database = config.get("Database", "create_database")
        if ((user_create_database == "None" and needs_database) or
                user_create_database == "True"):

            wait_on_confirmation = config.getboolean(
                "Database", "wait_on_confirmation")
            self._database_interface = SpynnakerDataBaseInterface(
                self._app_data_runtime_folder, wait_on_confirmation,
                self._database_socket_addresses)

            self._database_interface.add_system_params(
                self._time_scale_factor, self._machine_time_step,
                self._runtime)
            self._database_interface.add_machine_objects(self._machine)
            self._database_interface.add_partitionable_vertices(
                self._partitionable_graph)
            self._database_interface.add_partitioned_vertices(
                self._partitioned_graph, self._graph_mapper,
                self._partitionable_graph)
            self._database_interface.add_placements(self._placements,
                                                    self._partitioned_graph)
            self._database_interface.add_routing_infos(
                self._routing_infos, self._partitioned_graph)
            self._database_interface.add_routing_tables(self._router_tables)
            self._database_interface.add_tags(self._partitioned_graph,
                                              self._tags)
            execute_mapping = config.getboolean(
                "Database", "create_routing_info_to_neuron_id_mapping")
            if execute_mapping:
                self._database_interface.create_neuron_to_key_mapping(
                    graph_mapper=self._graph_mapper,
                    partitionable_graph=self._partitionable_graph,
                    partitioned_graph=self._partitioned_graph,
                    routing_infos=self._routing_infos)
            # if using a reload script, add if that needs to wait for
            # confirmation
            if self._reports_states.transciever_report:
                self._reload_script.wait_on_confirmation = wait_on_confirmation
                for socket_address in self._database_socket_addresses:
                    self._reload_script.add_socket_address(socket_address)
            self._database_interface.send_read_notification()

        # execute data spec generation
        if do_timing:
            timer.start_timing()
        logger.info("*** Generating Output *** ")
        logger.debug("")
        executable_targets = self.generate_data_specifications()
        if do_timing:
            timer.take_sample()

        # execute data spec execution
        if do_timing:
            timer.start_timing()
        processor_to_app_data_base_address = \
            self.execute_data_specification_execution(
                config.getboolean("SpecExecution", "specExecOnHost"),
                self._hostname, self._placements, self._graph_mapper,
                write_text_specs=config.getboolean(
                    "Reports", "writeTextSpecs"),
                runtime_application_data_folder=self._app_data_runtime_folder,
                machine=self._machine)

        if self._reports_states is not None:
            reports.write_memory_map_report(self._report_default_directory,
                                            processor_to_app_data_base_address)

        if do_timing:
            timer.take_sample()

        if (not isinstance(self._machine, VirtualMachine) and
                config.getboolean("Execute", "run_simulation")):
            if do_timing:
                timer.start_timing()

            logger.info("*** Loading tags ***")
            self.load_tags(self._tags)

            if self._do_load is True:
                logger.info("*** Loading data ***")
                self._load_application_data(
                    self._placements, self._graph_mapper,
                    processor_to_app_data_base_address, self._hostname,
                    app_data_folder=self._app_data_runtime_folder,
                    verify=config.getboolean("Mode", "verify_writes"))
                self.load_routing_tables(self._router_tables, self._app_id)
                logger.info("*** Loading executables ***")
                self.load_executable_images(executable_targets, self._app_id)
                logger.info("*** Loading buffers ***")
                self.set_up_send_buffering(self._partitioned_graph,
                                           self._placements, self._tags)

            # end of entire loading setup
            if do_timing:
                timer.take_sample()

            if self._do_run is True:
                logger.info("*** Running simulation... *** ")
                if do_timing:
                    timer.start_timing()
                # every thing is in sync0. load the initial buffers
                self._send_buffer_manager.load_initial_buffers()
                if do_timing:
                    timer.take_sample()

                wait_on_confirmation = config.getboolean(
                    "Database", "wait_on_confirmation")
                send_start_notification = config.getboolean(
                    "Database", "send_start_notification")

                self.wait_for_cores_to_be_ready(executable_targets,
                                                self._app_id)

                # wait till external app is ready for us to start if required
                if (self._database_interface is not None and
                        wait_on_confirmation):
                    self._database_interface.wait_for_confirmation()

                self.start_all_cores(executable_targets, self._app_id)

                if (self._database_interface is not None and
                        send_start_notification):
                    self._database_interface.send_start_notification()

                if self._runtime is None:
                    logger.info("Application is set to run forever - exiting")
                else:
                    self.wait_for_execution_to_complete(
                        executable_targets, self._app_id, self._runtime,
                        self._time_scale_factor)
                self._has_ran = True
                if self._retrieve_provance_data:

                    progress = ProgressBar(self._placements.n_placements + 1,
                                           "getting provenance data")

                    # retrieve provence data from central
                    file_path = os.path.join(self._report_default_directory,
                                             "provance_data")

                    # check the directory doesnt already exist
                    if not os.path.exists(file_path):
                        os.mkdir(file_path)

                    # write provanence data
                    self.write_provenance_data_in_xml(file_path, self._txrx)
                    progress.update()

                    # retrieve provenance data from any cores that provide data
                    for placement in self._placements.placements:
                        if isinstance(placement.subvertex,
                                      AbstractProvidesProvenanceData):
                            core_file_path = os.path.join(
                                file_path,
                                "Provanence_data_for_{}_{}_{}_{}.xml".format(
                                    placement.subvertex.label,
                                    placement.x, placement.y, placement.p))
                            placement.subvertex.write_provenance_data_in_xml(
                                core_file_path, self.transceiver, placement)
                        progress.update()
                    progress.end()

        elif isinstance(self._machine, VirtualMachine):
            logger.info(
                "*** Using a Virtual Machine so no simulation will occur")
        else:
            logger.info("*** No simulation requested: Stopping. ***")
    def _get_spikes(
            self, graph_mapper, placements, transceiver, compatible_output,
            spike_recording_region, sub_vertex_out_spike_bytes_function):
        """
        Return a 2-column numpy array containing cell ids and spike times for
        recorded cells.   This is read directly from the memory for the board.
        """

        spike_times = list()
        spike_ids = list()
        ms_per_tick = self._machine_time_step / 1000.0

        # Find all the sub-vertices that this pynn_population.py exists on
        subvertices = graph_mapper.get_subvertices_from_vertex(self)
        progress_bar = ProgressBar(
            len(subvertices), "Getting spikes for {}".format(self._label))
        for subvertex in subvertices:
            placement = placements.get_placement_of_subvertex(subvertex)
            (x, y, p) = placement.x, placement.y, placement.p
            subvertex_slice = graph_mapper.get_subvertex_slice(subvertex)
            lo_atom = subvertex_slice.lo_atom
            hi_atom = subvertex_slice.hi_atom

            logger.debug("Reading spikes from chip {}, {}, core {}, "
                         "lo_atom {} hi_atom {}".format(
                             x, y, p, lo_atom, hi_atom))

            # Get the App Data for the core
            app_data_base_address = \
                transceiver.get_cpu_information_from_core(x, y, p).user[0]

            # Get the position of the spike buffer
            spike_region_base_address_offset = \
                dsg_utility_calls.get_region_base_address_offset(
                    app_data_base_address, spike_recording_region)
            spike_region_base_address_buf = buffer(transceiver.read_memory(
                x, y, spike_region_base_address_offset, 4))
            spike_region_base_address = struct.unpack_from(
                "<I", spike_region_base_address_buf)[0]
            spike_region_base_address += app_data_base_address

            # Read the spike data size
            number_of_bytes_written_buf = buffer(transceiver.read_memory(
                x, y, spike_region_base_address, 4))
            number_of_bytes_written = struct.unpack_from(
                "<I", number_of_bytes_written_buf)[0]

            # check that the number of spikes written is smaller or the same as
            # the size of the memory region we allocated for spikes
            out_spike_bytes = sub_vertex_out_spike_bytes_function(
                subvertex, subvertex_slice)
            size_of_region = self.get_recording_region_size(out_spike_bytes)

            if number_of_bytes_written > size_of_region:
                raise exceptions.MemReadException(
                    "the amount of memory written ({}) was larger than was "
                    "allocated for it ({})"
                    .format(number_of_bytes_written, size_of_region))

            # Read the spikes
            logger.debug("Reading {} ({}) bytes starting at {} + 4"
                         .format(number_of_bytes_written,
                                 hex(number_of_bytes_written),
                                 hex(spike_region_base_address)))
            spike_data = transceiver.read_memory(
                x, y, spike_region_base_address + 4, number_of_bytes_written)
            numpy_data = numpy.asarray(spike_data, dtype="uint8").view(
                dtype="uint32").byteswap().view("uint8")
            bits = numpy.fliplr(numpy.unpackbits(numpy_data).reshape(
                (-1, 32))).reshape((-1, out_spike_bytes * 8))
            times, indices = numpy.where(bits == 1)
            times = times * ms_per_tick
            indices = indices + lo_atom
            spike_ids.append(indices)
            spike_times.append(times)
            progress_bar.update()

        progress_bar.end()
        spike_ids = numpy.hstack(spike_ids)
        spike_times = numpy.hstack(spike_times)
        result = numpy.dstack((spike_ids, spike_times))[0]
        return result[numpy.lexsort((spike_times, spike_ids))]
    def get_neuron_parameter(
            self, region, compatible_output, has_ran, graph_mapper, placements,
            txrx, machine_time_step):
        if not has_ran:
            raise exceptions.SpynnakerException(
                "The simulation has not yet ran, therefore neuron param "
                "cannot be retrieved")

        times = numpy.zeros(0)
        ids = numpy.zeros(0)
        values = numpy.zeros(0)
        ms_per_tick = self._machine_time_step / 1000.0

        # Find all the sub-vertices that this pynn_population.py exists on
        subvertices = graph_mapper.get_subvertices_from_vertex(self)
        progress_bar = ProgressBar(len(subvertices), "Getting recorded data")
        for subvertex in subvertices:
            placment = placements.get_placement_of_subvertex(subvertex)
            (x, y, p) = placment.x, placment.y, placment.p

            # Get the App Data for the core
            app_data_base_address = txrx.\
                get_cpu_information_from_core(x, y, p).user[0]

            # Get the position of the value buffer
            neuron_param_region_base_address_offset = \
                dsg_utility_calls.get_region_base_address_offset(
                    app_data_base_address, region)
            neuron_param_region_base_address_buf = str(list(txrx.read_memory(
                x, y, neuron_param_region_base_address_offset, 4))[0])
            neuron_param_region_base_address = \
                struct.unpack("<I", neuron_param_region_base_address_buf)[0]
            neuron_param_region_base_address += app_data_base_address

            # Read the size
            number_of_bytes_written_buf = \
                str(list(txrx.read_memory(
                    x, y, neuron_param_region_base_address, 4))[0])

            number_of_bytes_written = \
                struct.unpack_from("<I", number_of_bytes_written_buf)[0]

            # Read the values
            logger.debug("Reading {} ({}) bytes starting at {}".format(
                number_of_bytes_written, hex(number_of_bytes_written),
                hex(neuron_param_region_base_address + 4)))

            neuron_param_region_data = txrx.read_memory(
                x, y, neuron_param_region_base_address + 4,
                number_of_bytes_written)

            vertex_slice = graph_mapper.get_subvertex_slice(subvertex)
            n_atoms = (vertex_slice.hi_atom - vertex_slice.lo_atom) + 1

            bytes_per_time_step = n_atoms * 4

            number_of_time_steps_written = \
                number_of_bytes_written / bytes_per_time_step

            logger.debug("Processing {} timesteps"
                         .format(number_of_time_steps_written))

            data_list = bytearray()
            for data in neuron_param_region_data:
                data_list.extend(data)

            numpy_data = numpy.asarray(data_list, dtype="uint8").view(
                dtype="<i4") / 32767.0
            values = numpy.append(values, numpy_data)
            times = numpy.append(
                times, numpy.repeat(range(numpy_data.size / n_atoms),
                                    n_atoms) * ms_per_tick)
            ids = numpy.append(ids, numpy.add(
                numpy.arange(numpy_data.size) % n_atoms, vertex_slice.lo_atom))
            progress_bar.update()

        progress_bar.end()
        result = numpy.dstack((ids, times, values))[0]
        result = result[numpy.lexsort((times, ids))]
        return result
    def _get_gsyn(
            self, region, compatible_output, has_ran, graph_mapper, placements,
            txrx, machine_time_step, runtime):
        if not has_ran:
            raise exceptions.SpynnakerException(
                "The simulation has not yet ran, therefore neuron param "
                "cannot be retrieved")

        ms_per_tick = self._machine_time_step / 1000.0
        n_timesteps = runtime / ms_per_tick

        tempfilehandle = tempfile.NamedTemporaryFile()
        data = numpy.memmap(
            tempfilehandle.file, shape=(n_timesteps, self._n_atoms),
            dtype="float64,float64,float64,float64")
        data["f0"] = (numpy.arange(self._n_atoms * n_timesteps) %
                      self._n_atoms).reshape((n_timesteps, self._n_atoms))
        data["f1"] = numpy.repeat(numpy.arange(0, n_timesteps * ms_per_tick,
                                  ms_per_tick), self._n_atoms).reshape(
                                      (n_timesteps, self._n_atoms))

        # Find all the sub-vertices that this pynn_population.py exists on
        subvertices = graph_mapper.get_subvertices_from_vertex(self)
        progress_bar = ProgressBar(
            len(subvertices), "Getting recorded gsyn for {}".format(
                self._label))
        for subvertex in subvertices:
            placment = placements.get_placement_of_subvertex(subvertex)
            (x, y, p) = placment.x, placment.y, placment.p

            # Get the App Data for the core
            app_data_base_address = txrx.\
                get_cpu_information_from_core(x, y, p).user[0]

            # Get the position of the value buffer
            neuron_param_region_base_address_offset = \
                dsg_utility_calls.get_region_base_address_offset(
                    app_data_base_address, region)
            neuron_param_region_base_address_buf = buffer(txrx.read_memory(
                x, y, neuron_param_region_base_address_offset, 4))
            neuron_param_region_base_address = struct.unpack_from(
                "<I", neuron_param_region_base_address_buf)[0]
            neuron_param_region_base_address += app_data_base_address

            # Read the size
            number_of_bytes_written_buf = buffer(txrx.read_memory(
                x, y, neuron_param_region_base_address, 4))

            number_of_bytes_written = struct.unpack_from(
                "<I", number_of_bytes_written_buf)[0]

            # Read the values
            logger.debug("Reading {} ({}) bytes starting at {}".format(
                number_of_bytes_written, hex(number_of_bytes_written),
                hex(neuron_param_region_base_address + 4)))

            neuron_param_region_data = txrx.read_memory(
                x, y, neuron_param_region_base_address + 4,
                number_of_bytes_written)

            vertex_slice = graph_mapper.get_subvertex_slice(subvertex)

            bytes_per_time_step = vertex_slice.n_atoms * 4

            number_of_time_steps_written = \
                number_of_bytes_written / bytes_per_time_step

            logger.debug("Processing {} timesteps"
                         .format(number_of_time_steps_written))

            numpy_data = (numpy.asarray(
                neuron_param_region_data, dtype="uint8").view(dtype="<i4") /
                32767.0).reshape((n_timesteps, vertex_slice.n_atoms * 2))
            data["f2"][:, vertex_slice.lo_atom:vertex_slice.hi_atom + 1] =\
                numpy_data[:, 0::2]
            data["f3"][:, vertex_slice.lo_atom:vertex_slice.hi_atom + 1] =\
                numpy_data[:, 1::2]
            progress_bar.update()

        progress_bar.end()
        data.shape = self._n_atoms * n_timesteps

        # Sort the data - apparently, using lexsort is faster, but it might
        # consume more memory, so the option is left open for sort-in-place
        order = numpy.lexsort((data["f1"], data["f0"]))
        # data.sort(order=['f0', 'f1'], axis=0)

        result = data.view(dtype="float64").reshape(
            (self._n_atoms * n_timesteps, 4))[order]
        return result
    def host_based_data_specification_execution(
        self, hostname, placements, graph_mapper, write_text_specs, application_data_runtime_folder, machine
    ):
        """

        :param hostname:
        :param placements:
        :param graph_mapper:
        :param write_text_specs:
        :param application_data_runtime_folder:
        :param machine:
        :return:
        """
        next_position_tracker = dict()
        space_available_tracker = dict()
        processor_to_app_data_base_address = dict()

        # create a progress bar for end users
        progress_bar = ProgressBar(len(list(placements.placements)), "Executing data specifications")

        for placement in placements.placements:
            associated_vertex = graph_mapper.get_vertex_from_subvertex(placement.subvertex)

            # if the vertex can generate a DSG, call it
            if isinstance(associated_vertex, AbstractDataSpecableVertex):

                data_spec_file_path = associated_vertex.get_data_spec_file_path(
                    placement.x, placement.y, placement.p, hostname, application_data_runtime_folder
                )
                app_data_file_path = associated_vertex.get_application_data_file_path(
                    placement.x, placement.y, placement.p, hostname, application_data_runtime_folder
                )
                data_spec_reader = FileDataReader(data_spec_file_path)
                data_writer = FileDataWriter(app_data_file_path)

                # locate current memory requirement
                chip = machine.get_chip_at(placement.x, placement.y)
                next_position = chip.sdram.user_base_address
                space_available = chip.sdram.size
                placement_key = (placement.x, placement.y)
                if placement_key in next_position_tracker:
                    next_position = next_position_tracker[placement_key]
                    space_available = space_available_tracker[placement_key]

                # generate a file writer for dse report (app pointer table)
                report_writer = None
                if write_text_specs:
                    new_report_directory = os.path.join(self._report_default_directory, "data_spec_text_files")

                    if not os.path.exists(new_report_directory):
                        os.mkdir(new_report_directory)

                    file_name = "{}_DSE_report_for_{}_{}_{}.txt".format(hostname, placement.x, placement.y, placement.p)
                    report_file_path = os.path.join(new_report_directory, file_name)
                    report_writer = FileDataWriter(report_file_path)

                # generate data spec executor
                host_based_data_spec_executor = DataSpecificationExecutor(
                    data_spec_reader, data_writer, space_available, report_writer
                )

                # update memory calc and run data spec executor
                bytes_used_by_spec = 0
                bytes_written_by_spec = 0
                try:
                    bytes_used_by_spec, bytes_written_by_spec = host_based_data_spec_executor.execute()
                except DataSpecificationException as e:
                    logger.error("Error executing data specification for {}".format(associated_vertex))
                    raise e

                # update base address mapper
                processor_mapping_key = (placement.x, placement.y, placement.p)
                processor_to_app_data_base_address[processor_mapping_key] = {
                    "start_address": next_position,
                    "memory_used": bytes_used_by_spec,
                    "memory_written": bytes_written_by_spec,
                }

                next_position_tracker[placement_key] = next_position + bytes_used_by_spec
                space_available_tracker[placement_key] = space_available - bytes_used_by_spec

            # update the progress bar
            progress_bar.update()

        # close the progress bar
        progress_bar.end()
        return processor_to_app_data_base_address
    def get_synaptic_list_from_machine(self, graph_mapper, partitioned_graph,
                                       placements, transceiver, routing_infos):
        """
        Get synaptic data for all connections in this Projection from the
        machine.
        :param graph_mapper:
        :param partitioned_graph:
        :param placements:
        :param transceiver:
        :param routing_infos:
        :return:
        """
        if self._stored_synaptic_data_from_machine is None:
            logger.debug("Reading synapse data for edge between {} and {}"
                         .format(self._pre_vertex.label,
                                 self._post_vertex.label))
            timer = None
            if conf.config.getboolean("Reports", "outputTimesForSections"):
                timer = Timer()
                timer.start_timing()

            subedges = \
                graph_mapper.get_partitioned_edges_from_partitionable_edge(
                    self)
            if subedges is None:
                subedges = list()

            synaptic_list = copy.copy(self._synapse_list)
            synaptic_list_rows = synaptic_list.get_rows()
            progress_bar = ProgressBar(
                len(subedges), "progress on reading back synaptic matrix")
            for subedge in subedges:
                n_rows = subedge.get_n_rows(graph_mapper)
                pre_vertex_slice = \
                    graph_mapper.get_subvertex_slice(subedge.pre_subvertex)
                post_vertex_slice = \
                    graph_mapper.get_subvertex_slice(subedge.post_subvertex)

                sub_edge_post_vertex = \
                    graph_mapper.get_vertex_from_subvertex(
                        subedge.post_subvertex)
                rows = sub_edge_post_vertex.get_synaptic_list_from_machine(
                    placements, transceiver, subedge.pre_subvertex, n_rows,
                    subedge.post_subvertex,
                    self._synapse_row_io, partitioned_graph,
                    routing_infos, subedge.weight_scales).get_rows()

                for i in range(len(rows)):
                    delay_stage = math.floor(
                        float(i) / float(pre_vertex_slice.n_atoms)) + 1
                    min_delay = (delay_stage *
                                 self.pre_vertex.max_delay_per_neuron)
                    max_delay = (min_delay +
                                 self.pre_vertex.max_delay_per_neuron - 1)
                    synaptic_list_rows[
                        (i % pre_vertex_slice.n_atoms) +
                        pre_vertex_slice.lo_atom].set_slice_values(
                            rows[i], post_vertex_slice, min_delay, max_delay)
                progress_bar.update()
            progress_bar.end()
            self._stored_synaptic_data_from_machine = synaptic_list

            if conf.config.getboolean("Reports", "outputTimesForSections"):
                timer.take_sample()

        return self._stored_synaptic_data_from_machine
Exemplo n.º 24
0
    def _get_spikes(self, graph_mapper, placements, transceiver,
                    compatible_output, spike_recording_region,
                    sub_vertex_out_spike_bytes_function):
        """
        Return a 2-column numpy array containing cell ids and spike times for
        recorded cells.   This is read directly from the memory for the board.
        """

        logger.info("Getting spikes for {}".format(self._label))

        spike_times = list()
        spike_ids = list()
        ms_per_tick = self._machine_time_step / 1000.0

        # Find all the sub-vertices that this pynn_population.py exists on
        subvertices = graph_mapper.get_subvertices_from_vertex(self)
        progress_bar = ProgressBar(len(subvertices), "Getting spikes")
        for subvertex in subvertices:
            placement = placements.get_placement_of_subvertex(subvertex)
            (x, y, p) = placement.x, placement.y, placement.p
            subvertex_slice = graph_mapper.get_subvertex_slice(subvertex)
            lo_atom = subvertex_slice.lo_atom
            hi_atom = subvertex_slice.hi_atom

            logger.debug("Reading spikes from chip {}, {}, core {}, "
                         "lo_atom {} hi_atom {}".format(
                             x, y, p, lo_atom, hi_atom))

            # Get the App Data for the core
            app_data_base_address = \
                transceiver.get_cpu_information_from_core(x, y, p).user[0]

            # Get the position of the spike buffer
            spike_region_base_address_offset = \
                dsg_utility_calls.get_region_base_address_offset(
                    app_data_base_address, spike_recording_region)
            spike_region_base_address_buf = transceiver.read_memory(
                x, y, spike_region_base_address_offset, 4)
            spike_region_base_address = struct.unpack_from(
                "<I", spike_region_base_address_buf)[0]
            spike_region_base_address += app_data_base_address

            # Read the spike data size
            number_of_bytes_written_buf = transceiver.read_memory(
                x, y, spike_region_base_address, 4)
            number_of_bytes_written = struct.unpack_from(
                "<I", number_of_bytes_written_buf)[0]

            # check that the number of spikes written is smaller or the same as
            # the size of the memory region we allocated for spikes
            out_spike_bytes = sub_vertex_out_spike_bytes_function(
                subvertex, subvertex_slice)
            size_of_region = self.get_recording_region_size(out_spike_bytes)

            if number_of_bytes_written > size_of_region:
                raise exceptions.MemReadException(
                    "the amount of memory written ({}) was larger than was "
                    "allocated for it ({})".format(number_of_bytes_written,
                                                   size_of_region))

            # Read the spikes
            logger.debug("Reading {} ({}) bytes starting at {} + 4".format(
                number_of_bytes_written, hex(number_of_bytes_written),
                hex(spike_region_base_address)))
            spike_data = transceiver.read_memory(x, y,
                                                 spike_region_base_address + 4,
                                                 number_of_bytes_written)
            numpy_data = numpy.asarray(
                spike_data,
                dtype="uint8").view(dtype="uint32").byteswap().view("uint8")
            bits = numpy.fliplr(
                numpy.unpackbits(numpy_data).reshape((-1, 32))).reshape(
                    (-1, out_spike_bytes * 8))
            times, indices = numpy.where(bits == 1)
            times = times * ms_per_tick
            indices = indices + lo_atom
            spike_ids.append(indices)
            spike_times.append(times)
            progress_bar.update()

        progress_bar.end()
        spike_ids = numpy.hstack(spike_ids)
        spike_times = numpy.hstack(spike_times)
        result = numpy.dstack((spike_ids, spike_times))[0]
        return result[numpy.lexsort((spike_times, spike_ids))]
Exemplo n.º 25
0
    def get_neuron_parameter(self, region, compatible_output, has_ran,
                             graph_mapper, placements, txrx, machine_time_step,
                             runtime):
        if not has_ran:
            raise exceptions.SpynnakerException(
                "The simulation has not yet ran, therefore neuron param "
                "cannot be retrieved")

        ms_per_tick = self._machine_time_step / 1000.0
        n_timesteps = runtime / ms_per_tick

        tempfilehandle = tempfile.NamedTemporaryFile()
        data = numpy.memmap(tempfilehandle.file,
                            shape=(n_timesteps, self._n_atoms),
                            dtype="float64,float64,float64")
        data["f0"] = (numpy.arange(self._n_atoms * n_timesteps) %
                      self._n_atoms).reshape((n_timesteps, self._n_atoms))
        data["f1"] = numpy.repeat(
            numpy.arange(0, n_timesteps * ms_per_tick, ms_per_tick),
            self._n_atoms).reshape((n_timesteps, self._n_atoms))

        # Find all the sub-vertices that this pynn_population.py exists on
        subvertices = graph_mapper.get_subvertices_from_vertex(self)
        progress_bar = ProgressBar(len(subvertices), "Getting recorded data")
        for subvertex in subvertices:
            placment = placements.get_placement_of_subvertex(subvertex)
            (x, y, p) = placment.x, placment.y, placment.p

            # Get the App Data for the core
            app_data_base_address = txrx.\
                get_cpu_information_from_core(x, y, p).user[0]

            # Get the position of the value buffer
            neuron_param_region_base_address_offset = \
                dsg_utility_calls.get_region_base_address_offset(
                    app_data_base_address, region)
            neuron_param_region_base_address_buf = txrx.read_memory(
                x, y, neuron_param_region_base_address_offset, 4)
            neuron_param_region_base_address = struct.unpack_from(
                "<I", neuron_param_region_base_address_buf)[0]
            neuron_param_region_base_address += app_data_base_address

            # Read the size
            number_of_bytes_written_buf = txrx.read_memory(
                x, y, neuron_param_region_base_address, 4)

            number_of_bytes_written = struct.unpack_from(
                "<I", number_of_bytes_written_buf)[0]

            # Read the values
            logger.debug("Reading {} ({}) bytes starting at {}".format(
                number_of_bytes_written, hex(number_of_bytes_written),
                hex(neuron_param_region_base_address + 4)))

            neuron_param_region_data = txrx.read_memory(
                x, y, neuron_param_region_base_address + 4,
                number_of_bytes_written)

            vertex_slice = graph_mapper.get_subvertex_slice(subvertex)

            bytes_per_time_step = vertex_slice.n_atoms * 4

            number_of_time_steps_written = \
                number_of_bytes_written / bytes_per_time_step

            logger.debug(
                "Processing {} timesteps".format(number_of_time_steps_written))

            numpy_data = (numpy.asarray(neuron_param_region_data,
                                        dtype="uint8").view(dtype="<i4") /
                          32767.0).reshape((n_timesteps, vertex_slice.n_atoms))
            data["f2"][:, vertex_slice.lo_atom:vertex_slice.hi_atom + 1] =\
                numpy_data
            progress_bar.update()

        progress_bar.end()
        data.shape = self._n_atoms * n_timesteps

        # Sort the data - apparently, using lexsort is faster, but it might
        # consume more memory, so the option is left open for sort-in-place
        order = numpy.lexsort((data["f1"], data["f0"]))
        # data.sort(order=['f0', 'f1'], axis=0)

        result = data.view(dtype="float64").reshape(
            (self._n_atoms * n_timesteps, 3))[order]
        return result
Exemplo n.º 26
0
    def run(self, run_time):
        """

        :param run_time:
        :return:
        """
        # sort out config param to be valid types
        width = config.get("Machine", "width")
        height = config.get("Machine", "height")
        if width == "None":
            width = None
        else:
            width = int(width)
        if height == "None":
            height = None
        else:
            height = int(height)

        number_of_boards = config.get("Machine", "number_of_boards")
        if number_of_boards == "None":
            number_of_boards = None

        self.setup_interfaces(
            hostname=self._hostname,
            bmp_details=config.get("Machine", "bmp_names"),
            downed_chips=config.get("Machine", "down_chips"),
            downed_cores=config.get("Machine", "down_cores"),
            board_version=config.getint("Machine", "version"),
            number_of_boards=number_of_boards,
            width=width,
            height=height,
            is_virtual=config.getboolean("Machine", "virtual_board"),
            virtual_has_wrap_arounds=config.getboolean(
                "Machine", "requires_wrap_arounds"),
            auto_detect_bmp=config.getboolean("Machine", "auto_detect_bmp"))

        # adds extra stuff needed by the reload script which cannot be given
        # directly.
        if self._reports_states.transciever_report:
            self._reload_script.runtime = run_time
            self._reload_script.time_scale_factor = self._time_scale_factor

        # create network report if needed
        if self._reports_states is not None:
            reports.network_specification_partitionable_report(
                self._report_default_directory, self._partitionable_graph,
                self._hostname)

        # calculate number of machine time steps
        if run_time is not None:
            self._no_machine_time_steps =\
                int((run_time * 1000.0) / self._machine_time_step)
            ceiled_machine_time_steps = \
                math.ceil((run_time * 1000.0) / self._machine_time_step)
            if self._no_machine_time_steps != ceiled_machine_time_steps:
                raise common_exceptions.ConfigurationException(
                    "The runtime and machine time step combination result in "
                    "a factional number of machine runable time steps and "
                    "therefore spinnaker cannot determine how many to run for")
            for vertex in self._partitionable_graph.vertices:
                if isinstance(vertex, AbstractDataSpecableVertex):
                    vertex.set_no_machine_time_steps(
                        self._no_machine_time_steps)
        else:
            self._no_machine_time_steps = None
            logger.warn("You have set a runtime that will never end, this may"
                        "cause the neural models to fail to partition "
                        "correctly")
            for vertex in self._partitionable_graph.vertices:
                if (isinstance(vertex, AbstractPopulationRecordableVertex)
                        and vertex.record):
                    raise common_exceptions.ConfigurationException(
                        "recording a population when set to infinite runtime "
                        "is not currently supportable in this tool chain."
                        "watch this space")

        do_timing = config.getboolean("Reports", "outputTimesForSections")
        if do_timing:
            timer = Timer()
        else:
            timer = None

        self.set_runtime(run_time)
        logger.info("*** Running Mapper *** ")
        if do_timing:
            timer.start_timing()
        self.map_model()
        if do_timing:
            timer.take_sample()

        # add database generation if requested
        needs_database = self._auto_detect_database(self._partitioned_graph)
        user_create_database = config.get("Database", "create_database")
        if ((user_create_database == "None" and needs_database)
                or user_create_database == "True"):

            wait_on_confirmation = config.getboolean("Database",
                                                     "wait_on_confirmation")
            self._database_interface = SpynnakerDataBaseInterface(
                self._app_data_runtime_folder, wait_on_confirmation,
                self._database_socket_addresses)

            self._database_interface.add_system_params(self._time_scale_factor,
                                                       self._machine_time_step,
                                                       self._runtime)
            self._database_interface.add_machine_objects(self._machine)
            self._database_interface.add_partitionable_vertices(
                self._partitionable_graph)
            self._database_interface.add_partitioned_vertices(
                self._partitioned_graph, self._graph_mapper,
                self._partitionable_graph)
            self._database_interface.add_placements(self._placements,
                                                    self._partitioned_graph)
            self._database_interface.add_routing_infos(self._routing_infos,
                                                       self._partitioned_graph)
            self._database_interface.add_routing_tables(self._router_tables)
            self._database_interface.add_tags(self._partitioned_graph,
                                              self._tags)
            execute_mapping = config.getboolean(
                "Database", "create_routing_info_to_neuron_id_mapping")
            if execute_mapping:
                self._database_interface.create_neuron_to_key_mapping(
                    graph_mapper=self._graph_mapper,
                    partitionable_graph=self._partitionable_graph,
                    partitioned_graph=self._partitioned_graph,
                    routing_infos=self._routing_infos)
            # if using a reload script, add if that needs to wait for
            # confirmation
            if self._reports_states.transciever_report:
                self._reload_script.wait_on_confirmation = wait_on_confirmation
                for socket_address in self._database_socket_addresses:
                    self._reload_script.add_socket_address(socket_address)
            self._database_interface.send_read_notification()

        # execute data spec generation
        if do_timing:
            timer.start_timing()
        logger.info("*** Generating Output *** ")
        logger.debug("")
        executable_targets = self.generate_data_specifications()
        if do_timing:
            timer.take_sample()

        # execute data spec execution
        if do_timing:
            timer.start_timing()
        processor_to_app_data_base_address = \
            self.execute_data_specification_execution(
                config.getboolean("SpecExecution", "specExecOnHost"),
                self._hostname, self._placements, self._graph_mapper,
                write_text_specs=config.getboolean(
                    "Reports", "writeTextSpecs"),
                runtime_application_data_folder=self._app_data_runtime_folder,
                machine=self._machine)

        if self._reports_states is not None:
            reports.write_memory_map_report(
                self._report_default_directory,
                processor_to_app_data_base_address)

        if do_timing:
            timer.take_sample()

        if (not isinstance(self._machine, VirtualMachine)
                and config.getboolean("Execute", "run_simulation")):
            if do_timing:
                timer.start_timing()

            logger.info("*** Loading tags ***")
            self.load_tags(self._tags)

            if self._do_load is True:
                logger.info("*** Loading data ***")
                self._load_application_data(
                    self._placements,
                    self._graph_mapper,
                    processor_to_app_data_base_address,
                    self._hostname,
                    app_data_folder=self._app_data_runtime_folder,
                    verify=config.getboolean("Mode", "verify_writes"))
                self.load_routing_tables(self._router_tables, self._app_id)
                logger.info("*** Loading executables ***")
                self.load_executable_images(executable_targets, self._app_id)
                logger.info("*** Loading buffers ***")
                self.set_up_send_buffering(self._partitioned_graph,
                                           self._placements, self._tags)

            # end of entire loading setup
            if do_timing:
                timer.take_sample()

            if self._do_run is True:
                logger.info("*** Running simulation... *** ")
                if do_timing:
                    timer.start_timing()
                # every thing is in sync0. load the initial buffers
                self._send_buffer_manager.load_initial_buffers()
                if do_timing:
                    timer.take_sample()

                wait_on_confirmation = config.getboolean(
                    "Database", "wait_on_confirmation")
                send_start_notification = config.getboolean(
                    "Database", "send_start_notification")

                self.wait_for_cores_to_be_ready(executable_targets,
                                                self._app_id)

                # wait till external app is ready for us to start if required
                if (self._database_interface is not None
                        and wait_on_confirmation):
                    self._database_interface.wait_for_confirmation()

                self.start_all_cores(executable_targets, self._app_id)

                if (self._database_interface is not None
                        and send_start_notification):
                    self._database_interface.send_start_notification()

                if self._runtime is None:
                    logger.info("Application is set to run forever - exiting")
                else:
                    self.wait_for_execution_to_complete(
                        executable_targets, self._app_id, self._runtime,
                        self._time_scale_factor)
                self._has_ran = True
                if self._retrieve_provance_data:

                    progress = ProgressBar(self._placements.n_placements + 1,
                                           "getting provenance data")

                    # retrieve provence data from central
                    file_path = os.path.join(self._report_default_directory,
                                             "provance_data")

                    # check the directory doesnt already exist
                    if not os.path.exists(file_path):
                        os.mkdir(file_path)

                    # write provanence data
                    self.write_provenance_data_in_xml(file_path, self._txrx)
                    progress.update()

                    # retrieve provenance data from any cores that provide data
                    for placement in self._placements.placements:
                        if isinstance(placement.subvertex,
                                      AbstractProvidesProvenanceData):
                            core_file_path = os.path.join(
                                file_path,
                                "Provanence_data_for_{}_{}_{}_{}.xml".format(
                                    placement.subvertex.label, placement.x,
                                    placement.y, placement.p))
                            placement.subvertex.write_provenance_data_in_xml(
                                core_file_path, self.transceiver, placement)
                        progress.update()
                    progress.end()

        elif isinstance(self._machine, VirtualMachine):
            logger.info(
                "*** Using a Virtual Machine so no simulation will occur")
        else:
            logger.info("*** No simulation requested: Stopping. ***")