def get_synaptic_list_from_machine(self, graph_mapper, partitioned_graph,
                                       placements, transceiver, routing_infos):
        """
        Get synaptic data for all connections in this Projection from the
        machine.
        """
        if self._stored_synaptic_data_from_machine is None:
            timer = None
            if conf.config.getboolean("Reports", "outputTimesForSections"):
                timer = Timer()
                timer.start_timing()

            logger.debug(
                "Reading synapse data for edge between {} and {}".format(
                    self._pre_vertex.label, self._post_vertex.label))
            subedges = \
                graph_mapper.get_partitioned_edges_from_partitionable_edge(
                    self)
            if subedges is None:
                subedges = list()

            synaptic_list = copy.copy(self._synapse_list)
            synaptic_list_rows = synaptic_list.get_rows()
            progress_bar = ProgressBar(
                len(subedges), "progress on reading back synaptic matrix")
            for subedge in subedges:
                n_rows = subedge.get_n_rows(graph_mapper)
                pre_vertex_slice = \
                    graph_mapper.get_subvertex_slice(subedge.pre_subvertex)
                post_vertex_slice = \
                    graph_mapper.get_subvertex_slice(subedge.post_subvertex)

                sub_edge_post_vertex = \
                    graph_mapper.get_vertex_from_subvertex(
                        subedge.post_subvertex)
                rows = sub_edge_post_vertex.get_synaptic_list_from_machine(
                    placements, transceiver, subedge.pre_subvertex, n_rows,
                    subedge.post_subvertex, self._synapse_row_io,
                    partitioned_graph, routing_infos,
                    subedge.weight_scales).get_rows()

                for i in range(len(rows)):
                    synaptic_list_rows[
                        i + pre_vertex_slice.lo_atom].set_slice_values(
                            rows[i], vertex_slice=post_vertex_slice)
                progress_bar.update()
            progress_bar.end()
            self._stored_synaptic_data_from_machine = synaptic_list
            if conf.config.getboolean("Reports", "outputTimesForSections"):
                timer.take_sample()

        return self._stored_synaptic_data_from_machine
Esempio n. 2
0
    def generate_data_specifications(self):
        """ generates the dsg for the graph.

        :return:
        """

        # iterate though subvertexes and call generate_data_spec for each
        # vertex
        executable_targets = ExecutableTargets()

        # create a progress bar for end users
        progress_bar = ProgressBar(len(list(self._placements.placements)),
                                   "on generating data specifications")
        for placement in self._placements.placements:
            associated_vertex =\
                self._graph_mapper.get_vertex_from_subvertex(
                    placement.subvertex)

            # if the vertex can generate a DSG, call it
            if isinstance(associated_vertex, AbstractDataSpecableVertex):

                ip_tags = self._tags.get_ip_tags_for_vertex(
                    placement.subvertex)
                reverse_ip_tags = self._tags.get_reverse_ip_tags_for_vertex(
                    placement.subvertex)
                associated_vertex.generate_data_spec(
                    placement.subvertex, placement, self._partitioned_graph,
                    self._partitionable_graph, self._routing_infos,
                    self._hostname, self._graph_mapper,
                    self._report_default_directory, ip_tags, reverse_ip_tags,
                    self._writeTextSpecs, self._app_data_runtime_folder)
                progress_bar.update()

                # Get name of binary from vertex
                binary_name = associated_vertex.get_binary_file_name()

                # Attempt to find this within search paths
                binary_path = executable_finder.get_executable_path(
                    binary_name)
                if binary_path is None:
                    raise exceptions.ExecutableNotFoundException(binary_name)

                if not executable_targets.has_binary(binary_path):
                    executable_targets.add_binary(binary_path)
                executable_targets.add_processor(binary_path, placement.x,
                                                 placement.y, placement.p)

        # finish the progress bar
        progress_bar.end()

        return executable_targets
Esempio n. 3
0
    def run(self, subgraph, graph_mapper):
        new_sub_graph = PartitionedGraph(label=subgraph.label)
        new_graph_mapper = GraphMapper(graph_mapper.first_graph_label,
                                       subgraph.label)

        # create progress bar
        progress_bar = \
            ProgressBar(len(subgraph.subvertices) + len(subgraph.subedges),
                        "on checking which subedges are filterable given "
                        "heuristics")

        # add the subverts directly, as they wont be pruned.
        for subvert in subgraph.subvertices:
            new_sub_graph.add_subvertex(subvert)
            associated_vertex = graph_mapper.get_vertex_from_subvertex(subvert)
            vertex_slice = graph_mapper.get_subvertex_slice(subvert)
            new_graph_mapper.add_subvertex(subvertex=subvert,
                                           vertex_slice=vertex_slice,
                                           vertex=associated_vertex)
            progress_bar.update()

        # start checking subedges to decide which ones need pruning....
        for subedge in subgraph.subedges:
            if not self._is_filterable(subedge, graph_mapper):
                logger.debug("this subedge was not pruned {}".format(subedge))
                new_sub_graph.add_subedge(subedge)
                associated_edge = graph_mapper.\
                    get_partitionable_edge_from_partitioned_edge(subedge)
                new_graph_mapper.add_partitioned_edge(subedge, associated_edge)
            else:
                logger.debug("this subedge was pruned {}".format(subedge))
            progress_bar.update()
        progress_bar.end()

        # returned the pruned partitioned_graph and graph_mapper
        return new_sub_graph, new_graph_mapper
Esempio n. 4
0
    def _get_spikes(self, graph_mapper, placements, transceiver,
                    compatible_output, spike_recording_region,
                    sub_vertex_out_spike_bytes_function):
        """
        Return a 2-column numpy array containing cell ids and spike times for
        recorded cells.   This is read directly from the memory for the board.
        """

        logger.info("Getting spikes for {}".format(self._label))

        spike_times = list()
        spike_ids = list()
        ms_per_tick = self._machine_time_step / 1000.0

        # Find all the sub-vertices that this pynn_population.py exists on
        subvertices = graph_mapper.get_subvertices_from_vertex(self)
        progress_bar = ProgressBar(len(subvertices), "Getting spikes")
        for subvertex in subvertices:
            placement = placements.get_placement_of_subvertex(subvertex)
            (x, y, p) = placement.x, placement.y, placement.p
            subvertex_slice = graph_mapper.get_subvertex_slice(subvertex)
            lo_atom = subvertex_slice.lo_atom
            hi_atom = subvertex_slice.hi_atom

            logger.debug("Reading spikes from chip {}, {}, core {}, "
                         "lo_atom {} hi_atom {}".format(
                             x, y, p, lo_atom, hi_atom))

            # Get the App Data for the core
            app_data_base_address = \
                transceiver.get_cpu_information_from_core(x, y, p).user[0]

            # Get the position of the spike buffer
            spike_region_base_address_offset = \
                dsg_utility_calls.get_region_base_address_offset(
                    app_data_base_address, spike_recording_region)
            spike_region_base_address_buf = transceiver.read_memory(
                x, y, spike_region_base_address_offset, 4)
            spike_region_base_address = struct.unpack_from(
                "<I", spike_region_base_address_buf)[0]
            spike_region_base_address += app_data_base_address

            # Read the spike data size
            number_of_bytes_written_buf = transceiver.read_memory(
                x, y, spike_region_base_address, 4)
            number_of_bytes_written = struct.unpack_from(
                "<I", number_of_bytes_written_buf)[0]

            # check that the number of spikes written is smaller or the same as
            # the size of the memory region we allocated for spikes
            out_spike_bytes = sub_vertex_out_spike_bytes_function(
                subvertex, subvertex_slice)
            size_of_region = self.get_recording_region_size(out_spike_bytes)

            if number_of_bytes_written > size_of_region:
                raise exceptions.MemReadException(
                    "the amount of memory written ({}) was larger than was "
                    "allocated for it ({})".format(number_of_bytes_written,
                                                   size_of_region))

            # Read the spikes
            logger.debug("Reading {} ({}) bytes starting at {} + 4".format(
                number_of_bytes_written, hex(number_of_bytes_written),
                hex(spike_region_base_address)))
            spike_data = transceiver.read_memory(x, y,
                                                 spike_region_base_address + 4,
                                                 number_of_bytes_written)
            numpy_data = numpy.asarray(
                spike_data,
                dtype="uint8").view(dtype="uint32").byteswap().view("uint8")
            bits = numpy.fliplr(
                numpy.unpackbits(numpy_data).reshape((-1, 32))).reshape(
                    (-1, out_spike_bytes * 8))
            times, indices = numpy.where(bits == 1)
            times = times * ms_per_tick
            indices = indices + lo_atom
            spike_ids.append(indices)
            spike_times.append(times)
            progress_bar.update()

        progress_bar.end()
        spike_ids = numpy.hstack(spike_ids)
        spike_times = numpy.hstack(spike_times)
        result = numpy.dstack((spike_ids, spike_times))[0]
        return result[numpy.lexsort((spike_times, spike_ids))]
Esempio n. 5
0
    def get_neuron_parameter(self, region, compatible_output, has_ran,
                             graph_mapper, placements, txrx, machine_time_step,
                             runtime):
        if not has_ran:
            raise exceptions.SpynnakerException(
                "The simulation has not yet ran, therefore neuron param "
                "cannot be retrieved")

        ms_per_tick = self._machine_time_step / 1000.0
        n_timesteps = runtime / ms_per_tick

        tempfilehandle = tempfile.NamedTemporaryFile()
        data = numpy.memmap(tempfilehandle.file,
                            shape=(n_timesteps, self._n_atoms),
                            dtype="float64,float64,float64")
        data["f0"] = (numpy.arange(self._n_atoms * n_timesteps) %
                      self._n_atoms).reshape((n_timesteps, self._n_atoms))
        data["f1"] = numpy.repeat(
            numpy.arange(0, n_timesteps * ms_per_tick, ms_per_tick),
            self._n_atoms).reshape((n_timesteps, self._n_atoms))

        # Find all the sub-vertices that this pynn_population.py exists on
        subvertices = graph_mapper.get_subvertices_from_vertex(self)
        progress_bar = ProgressBar(len(subvertices), "Getting recorded data")
        for subvertex in subvertices:
            placment = placements.get_placement_of_subvertex(subvertex)
            (x, y, p) = placment.x, placment.y, placment.p

            # Get the App Data for the core
            app_data_base_address = txrx.\
                get_cpu_information_from_core(x, y, p).user[0]

            # Get the position of the value buffer
            neuron_param_region_base_address_offset = \
                dsg_utility_calls.get_region_base_address_offset(
                    app_data_base_address, region)
            neuron_param_region_base_address_buf = txrx.read_memory(
                x, y, neuron_param_region_base_address_offset, 4)
            neuron_param_region_base_address = struct.unpack_from(
                "<I", neuron_param_region_base_address_buf)[0]
            neuron_param_region_base_address += app_data_base_address

            # Read the size
            number_of_bytes_written_buf = txrx.read_memory(
                x, y, neuron_param_region_base_address, 4)

            number_of_bytes_written = struct.unpack_from(
                "<I", number_of_bytes_written_buf)[0]

            # Read the values
            logger.debug("Reading {} ({}) bytes starting at {}".format(
                number_of_bytes_written, hex(number_of_bytes_written),
                hex(neuron_param_region_base_address + 4)))

            neuron_param_region_data = txrx.read_memory(
                x, y, neuron_param_region_base_address + 4,
                number_of_bytes_written)

            vertex_slice = graph_mapper.get_subvertex_slice(subvertex)

            bytes_per_time_step = vertex_slice.n_atoms * 4

            number_of_time_steps_written = \
                number_of_bytes_written / bytes_per_time_step

            logger.debug(
                "Processing {} timesteps".format(number_of_time_steps_written))

            numpy_data = (numpy.asarray(neuron_param_region_data,
                                        dtype="uint8").view(dtype="<i4") /
                          32767.0).reshape((n_timesteps, vertex_slice.n_atoms))
            data["f2"][:, vertex_slice.lo_atom:vertex_slice.hi_atom + 1] =\
                numpy_data
            progress_bar.update()

        progress_bar.end()
        data.shape = self._n_atoms * n_timesteps

        # Sort the data - apparently, using lexsort is faster, but it might
        # consume more memory, so the option is left open for sort-in-place
        order = numpy.lexsort((data["f1"], data["f0"]))
        # data.sort(order=['f0', 'f1'], axis=0)

        result = data.view(dtype="float64").reshape(
            (self._n_atoms * n_timesteps, 3))[order]
        return result
Esempio n. 6
0
    def run(self, run_time):
        """

        :param run_time:
        :return:
        """
        # sort out config param to be valid types
        width = config.get("Machine", "width")
        height = config.get("Machine", "height")
        if width == "None":
            width = None
        else:
            width = int(width)
        if height == "None":
            height = None
        else:
            height = int(height)

        number_of_boards = config.get("Machine", "number_of_boards")
        if number_of_boards == "None":
            number_of_boards = None

        self.setup_interfaces(
            hostname=self._hostname,
            bmp_details=config.get("Machine", "bmp_names"),
            downed_chips=config.get("Machine", "down_chips"),
            downed_cores=config.get("Machine", "down_cores"),
            board_version=config.getint("Machine", "version"),
            number_of_boards=number_of_boards,
            width=width,
            height=height,
            is_virtual=config.getboolean("Machine", "virtual_board"),
            virtual_has_wrap_arounds=config.getboolean(
                "Machine", "requires_wrap_arounds"),
            auto_detect_bmp=config.getboolean("Machine", "auto_detect_bmp"))

        # adds extra stuff needed by the reload script which cannot be given
        # directly.
        if self._reports_states.transciever_report:
            self._reload_script.runtime = run_time
            self._reload_script.time_scale_factor = self._time_scale_factor

        # create network report if needed
        if self._reports_states is not None:
            reports.network_specification_partitionable_report(
                self._report_default_directory, self._partitionable_graph,
                self._hostname)

        # calculate number of machine time steps
        if run_time is not None:
            self._no_machine_time_steps =\
                int((run_time * 1000.0) / self._machine_time_step)
            ceiled_machine_time_steps = \
                math.ceil((run_time * 1000.0) / self._machine_time_step)
            if self._no_machine_time_steps != ceiled_machine_time_steps:
                raise common_exceptions.ConfigurationException(
                    "The runtime and machine time step combination result in "
                    "a factional number of machine runable time steps and "
                    "therefore spinnaker cannot determine how many to run for")
            for vertex in self._partitionable_graph.vertices:
                if isinstance(vertex, AbstractDataSpecableVertex):
                    vertex.set_no_machine_time_steps(
                        self._no_machine_time_steps)
        else:
            self._no_machine_time_steps = None
            logger.warn("You have set a runtime that will never end, this may"
                        "cause the neural models to fail to partition "
                        "correctly")
            for vertex in self._partitionable_graph.vertices:
                if (isinstance(vertex, AbstractPopulationRecordableVertex)
                        and vertex.record):
                    raise common_exceptions.ConfigurationException(
                        "recording a population when set to infinite runtime "
                        "is not currently supportable in this tool chain."
                        "watch this space")

        do_timing = config.getboolean("Reports", "outputTimesForSections")
        if do_timing:
            timer = Timer()
        else:
            timer = None

        self.set_runtime(run_time)
        logger.info("*** Running Mapper *** ")
        if do_timing:
            timer.start_timing()
        self.map_model()
        if do_timing:
            timer.take_sample()

        # add database generation if requested
        needs_database = self._auto_detect_database(self._partitioned_graph)
        user_create_database = config.get("Database", "create_database")
        if ((user_create_database == "None" and needs_database)
                or user_create_database == "True"):

            wait_on_confirmation = config.getboolean("Database",
                                                     "wait_on_confirmation")
            self._database_interface = SpynnakerDataBaseInterface(
                self._app_data_runtime_folder, wait_on_confirmation,
                self._database_socket_addresses)

            self._database_interface.add_system_params(self._time_scale_factor,
                                                       self._machine_time_step,
                                                       self._runtime)
            self._database_interface.add_machine_objects(self._machine)
            self._database_interface.add_partitionable_vertices(
                self._partitionable_graph)
            self._database_interface.add_partitioned_vertices(
                self._partitioned_graph, self._graph_mapper,
                self._partitionable_graph)
            self._database_interface.add_placements(self._placements,
                                                    self._partitioned_graph)
            self._database_interface.add_routing_infos(self._routing_infos,
                                                       self._partitioned_graph)
            self._database_interface.add_routing_tables(self._router_tables)
            self._database_interface.add_tags(self._partitioned_graph,
                                              self._tags)
            execute_mapping = config.getboolean(
                "Database", "create_routing_info_to_neuron_id_mapping")
            if execute_mapping:
                self._database_interface.create_neuron_to_key_mapping(
                    graph_mapper=self._graph_mapper,
                    partitionable_graph=self._partitionable_graph,
                    partitioned_graph=self._partitioned_graph,
                    routing_infos=self._routing_infos)
            # if using a reload script, add if that needs to wait for
            # confirmation
            if self._reports_states.transciever_report:
                self._reload_script.wait_on_confirmation = wait_on_confirmation
                for socket_address in self._database_socket_addresses:
                    self._reload_script.add_socket_address(socket_address)
            self._database_interface.send_read_notification()

        # execute data spec generation
        if do_timing:
            timer.start_timing()
        logger.info("*** Generating Output *** ")
        logger.debug("")
        executable_targets = self.generate_data_specifications()
        if do_timing:
            timer.take_sample()

        # execute data spec execution
        if do_timing:
            timer.start_timing()
        processor_to_app_data_base_address = \
            self.execute_data_specification_execution(
                config.getboolean("SpecExecution", "specExecOnHost"),
                self._hostname, self._placements, self._graph_mapper,
                write_text_specs=config.getboolean(
                    "Reports", "writeTextSpecs"),
                runtime_application_data_folder=self._app_data_runtime_folder,
                machine=self._machine)

        if self._reports_states is not None:
            reports.write_memory_map_report(
                self._report_default_directory,
                processor_to_app_data_base_address)

        if do_timing:
            timer.take_sample()

        if (not isinstance(self._machine, VirtualMachine)
                and config.getboolean("Execute", "run_simulation")):
            if do_timing:
                timer.start_timing()

            logger.info("*** Loading tags ***")
            self.load_tags(self._tags)

            if self._do_load is True:
                logger.info("*** Loading data ***")
                self._load_application_data(
                    self._placements,
                    self._graph_mapper,
                    processor_to_app_data_base_address,
                    self._hostname,
                    app_data_folder=self._app_data_runtime_folder,
                    verify=config.getboolean("Mode", "verify_writes"))
                self.load_routing_tables(self._router_tables, self._app_id)
                logger.info("*** Loading executables ***")
                self.load_executable_images(executable_targets, self._app_id)
                logger.info("*** Loading buffers ***")
                self.set_up_send_buffering(self._partitioned_graph,
                                           self._placements, self._tags)

            # end of entire loading setup
            if do_timing:
                timer.take_sample()

            if self._do_run is True:
                logger.info("*** Running simulation... *** ")
                if do_timing:
                    timer.start_timing()
                # every thing is in sync0. load the initial buffers
                self._send_buffer_manager.load_initial_buffers()
                if do_timing:
                    timer.take_sample()

                wait_on_confirmation = config.getboolean(
                    "Database", "wait_on_confirmation")
                send_start_notification = config.getboolean(
                    "Database", "send_start_notification")

                self.wait_for_cores_to_be_ready(executable_targets,
                                                self._app_id)

                # wait till external app is ready for us to start if required
                if (self._database_interface is not None
                        and wait_on_confirmation):
                    self._database_interface.wait_for_confirmation()

                self.start_all_cores(executable_targets, self._app_id)

                if (self._database_interface is not None
                        and send_start_notification):
                    self._database_interface.send_start_notification()

                if self._runtime is None:
                    logger.info("Application is set to run forever - exiting")
                else:
                    self.wait_for_execution_to_complete(
                        executable_targets, self._app_id, self._runtime,
                        self._time_scale_factor)
                self._has_ran = True
                if self._retrieve_provance_data:

                    progress = ProgressBar(self._placements.n_placements + 1,
                                           "getting provenance data")

                    # retrieve provence data from central
                    file_path = os.path.join(self._report_default_directory,
                                             "provance_data")

                    # check the directory doesnt already exist
                    if not os.path.exists(file_path):
                        os.mkdir(file_path)

                    # write provanence data
                    self.write_provenance_data_in_xml(file_path, self._txrx)
                    progress.update()

                    # retrieve provenance data from any cores that provide data
                    for placement in self._placements.placements:
                        if isinstance(placement.subvertex,
                                      AbstractProvidesProvenanceData):
                            core_file_path = os.path.join(
                                file_path,
                                "Provanence_data_for_{}_{}_{}_{}.xml".format(
                                    placement.subvertex.label, placement.x,
                                    placement.y, placement.p))
                            placement.subvertex.write_provenance_data_in_xml(
                                core_file_path, self.transceiver, placement)
                        progress.update()
                    progress.end()

        elif isinstance(self._machine, VirtualMachine):
            logger.info(
                "*** Using a Virtual Machine so no simulation will occur")
        else:
            logger.info("*** No simulation requested: Stopping. ***")