def get_provenance_data_from_machine(self, transceiver, placement):
        provenance_data = self._read_provenance_data(transceiver, placement)
        provenance_items = self._read_basic_provenance_items(
            provenance_data, placement)
        provenance_data = self._get_remaining_provenance_data_items(
            provenance_data)

        n_saturations = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.SATURATION_COUNT.value]
        n_buffer_overflows = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.BUFFER_OVERFLOW_COUNT.value]
        n_pre_synaptic_events = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.PRE_SYNAPTIC_EVENT_COUNT.value]
        last_timer_tick = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.CURRENT_TIMER_TIC.value]

        label, x, y, p, names = self._get_placement_details(placement)

        # translate into provenance data items
        provenance_items.append(
            ProvenanceDataItem(
                self._add_name(names, "Times_synaptic_weights_have_saturated"),
                n_saturations,
                report=n_saturations > 0,
                message=
                ("The weights from the synapses for {} on {}, {}, {} saturated "
                 "{} times. If this causes issues you can increase the "
                 "spikes_per_second and / or ring_buffer_sigma "
                 "values located within the .spynnaker.cfg file.".format(
                     label, x, y, p, n_saturations))))
        provenance_items.append(
            ProvenanceDataItem(
                self._add_name(names, "Times_the_input_buffer_lost_packets"),
                n_buffer_overflows,
                report=n_buffer_overflows > 0,
                message=
                ("The input buffer for {} on {}, {}, {} lost packets on {} "
                 "occasions. This is often a sign that the system is running "
                 "too quickly for the number of neurons per core.  Please "
                 "increase the timer_tic or time_scale_factor or decrease the "
                 "number of neurons per core.".format(label, x, y, p,
                                                      n_buffer_overflows))))
        provenance_items.append(
            ProvenanceDataItem(
                self._add_name(names, "Total_pre_synaptic_events"),
                n_pre_synaptic_events))
        provenance_items.append(
            ProvenanceDataItem(
                self._add_name(names, "Last_timer_tic_the_core_ran_to"),
                last_timer_tick))
        return provenance_items
Exemple #2
0
    def __call__(
            self, transceiver, machine, router_tables, has_ran,
            provenance_data_objects=None):
        """
        :param transceiver: the SpiNNMan interface object
        :param machine: the python representation of the spinnaker machine
        :param router_tables: the router tables that have been generated
        :param has_ran: token that states that the simulation has ran
        """

        if not has_ran:
            raise exceptions.ConfigurationException(
                "This function has been called before the simulation has ran."
                " This is deemed an error, please rectify and try again")

        self._total_sent_packets = 0
        self._total_new_packets = 0
        self._total_dropped_packets = 0
        self._total_missed_dropped_packets = 0
        self._total_lost_dropped_packets = 0

        if provenance_data_objects is not None:
            prov_items = provenance_data_objects
        else:
            prov_items = list()

        prov_items.extend(self._write_router_provenance_data(
            router_tables, machine, transceiver))

        prov_items.append(ProvenanceDataItem(
            ["router_provenance", "total_sent_packets"],
            self._total_sent_packets))
        prov_items.append(ProvenanceDataItem(
            ["router_provenance", "total_created_packets"],
            self._total_new_packets))
        prov_items.append(ProvenanceDataItem(
            ["router_provenance", "total_dropped_packets"],
            self._total_dropped_packets))
        prov_items.append(ProvenanceDataItem(
            ["router_provenance", "total_missed_dropped_packets"],
            self._total_missed_dropped_packets))
        prov_items.append(ProvenanceDataItem(
            ["router_provenance", "total_lost_dropped_packets"],
            self._total_lost_dropped_packets))

        return {'prov_items': prov_items}
    def get_provenance_data_from_machine(self, transceiver, placement):
        """ Get provenance from the machine

        :param transceiver: spinnman interface to the machine
        :param placement: the location of this vertex on the machine
        :return:
        """
        provenance_data = self._read_provenance_data(transceiver, placement)
        provenance_items = self._read_basic_provenance_items(
            provenance_data, placement)
        provenance_data = self._get_remaining_provenance_data_items(
            provenance_data)
        _, _, _, _, names = self._get_placement_details(placement)

        provenance_items.append(ProvenanceDataItem(
            self._add_name(names, "lost_packets_without_payload"),
            provenance_data[0],
            report=provenance_data[0] > 0,
            message=(
                "The live packet gatherer has lost {} packets which have "
                "payloads during its execution. Try increasing the machine "
                "time step or increasing the time scale factor. If you are "
                "running in real time, try reducing the number of vertices "
                "which are feeding this live packet gatherer".format(
                    provenance_data[0]))))
        provenance_items.append(ProvenanceDataItem(
            self._add_name(names, "lost_packets_with_payload"),
            provenance_data[1],
            report=provenance_data[1] > 0,
            message=(
                "The live packet gatherer has lost {} packets which do not "
                "have payloads during its execution. Try increasing the "
                "machine time step or increasing the time scale factor. If "
                "you are running in real time, try reducing the number of "
                "vertices which are feeding this live packet gatherer".format(
                    provenance_data[1]))))

        return provenance_items
Exemple #4
0
 def get_provenance_data(self):
     data_items = list()
     name = "{}_{}_{}".format(self._pre_population.label,
                              self._post_population.label,
                              self.__class__.__name__)
     data_items.append(
         ProvenanceDataItem(
             [name, "Times_synaptic_delays_got_clipped"],
             self._n_clipped_delays,
             report=self._n_clipped_delays > 0,
             message=
             ("The delays in the connector {} from {} to {} was clipped "
              "to {} a total of {} times.  This can be avoided by reducing "
              "the timestep or increasing the minimum delay to one "
              "timestep".format(self.__class__.__name__,
                                self._pre_population.label,
                                self._post_population.label,
                                self._min_delay, self._n_clipped_delays))))
     return data_items
    def get_provenance_data_from_machine(self, transceiver, placement):
        provenance_data = self._read_provenance_data(transceiver, placement)
        provenance_items = self._read_basic_provenance_items(
            provenance_data, placement)
        provenance_data = self._get_remaining_provenance_data_items(
            provenance_data)

        n_packets_received = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.N_PACKETS_RECEIVED.value]
        n_packets_processed = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.N_PACKETS_PROCESSED.value]
        n_packets_added = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.N_PACKETS_ADDED.value]
        n_packets_sent = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.N_PACKETS_SENT.value]
        n_buffer_overflows = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.N_BUFFER_OVERFLOWS.value]
        n_delays = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.N_DELAYS.value]

        label, x, y, p, names = self._get_placement_details(placement)

        # translate into provenance data items
        provenance_items.append(ProvenanceDataItem(
            self._add_name(names, "Number_of_packets_received"),
            n_packets_received))
        provenance_items.append(ProvenanceDataItem(
            self._add_name(names, "Number_of_packets_processed"),
            n_packets_processed,
            report=n_packets_received != n_packets_processed,
            message=(
                "The delay extension {} on {}, {}, {} only processed {} of {}"
                " received packets.  This could indicate a fault.".format(
                    label, x, y, p, n_packets_processed, n_packets_received))))
        provenance_items.append(ProvenanceDataItem(
            self._add_name(names, "Number_of_packets_added_to_delay_slot"),
            n_packets_added,
            report=n_packets_added != n_packets_processed,
            message=(
                "The delay extension {} on {}, {}, {} only added {} of {}"
                " processed packets.  This could indicate a routing or"
                " filtering fault".format(
                    label, x, y, p, n_packets_added, n_packets_processed))))
        provenance_items.append(ProvenanceDataItem(
            self._add_name(names, "Number_of_packets_sent"),
            n_packets_sent))
        provenance_items.append(ProvenanceDataItem(
            self._add_name(names, "Times_the_input_buffer_lost_packets"),
            n_buffer_overflows,
            report=n_buffer_overflows > 0,
            message=(
                "The input buffer for {} on {}, {}, {} lost packets on {} "
                "occasions. This is often a sign that the system is running "
                "too quickly for the number of neurons per core.  Please "
                "increase the timer_tic or time_scale_factor or decrease the "
                "number of neurons per core.".format(
                    label, x, y, p, n_buffer_overflows))))
        provenance_items.append(ProvenanceDataItem(
            self._add_name(names, "Number_of_times_delayed_to_spread_traffic"),
            n_delays))
        return provenance_items
Exemple #6
0
    def _read_basic_provenance_items(self, provenance_data, placement):
        transmission_event_overflow = provenance_data[
            self.PROVENANCE_DATA_ENTRIES.TRANSMISSION_EVENT_OVERFLOW.value]
        callback_queue_overloaded = provenance_data[
            self.PROVENANCE_DATA_ENTRIES.CALLBACK_QUEUE_OVERLOADED.value]
        dma_queue_overloaded = provenance_data[
            self.PROVENANCE_DATA_ENTRIES.DMA_QUEUE_OVERLOADED.value]
        number_of_times_timer_tic_over_ran = provenance_data[
            self.PROVENANCE_DATA_ENTRIES.TIMER_TIC_HAS_OVERRUN.value]
        max_number_of_times_timer_tic_over_ran = provenance_data[
            self.PROVENANCE_DATA_ENTRIES.MAX_NUMBER_OF_TIMER_TIC_OVERRUN.value]

        # create provenance data items for returning
        label, x, y, p, names = self._get_placement_details(placement)
        data_items = list()
        data_items.append(
            ProvenanceDataItem(
                self._add_name(names,
                               "Times_the_transmission_of_spikes_overran"),
                transmission_event_overflow,
                report=transmission_event_overflow != 0,
                message=(
                    "The transmission buffer for {} on {}, {}, {} was blocked "
                    "on {} occasions. This is often a sign that the system is "
                    "experiencing back pressure from the communication fabric. "
                    "Please either: "
                    "1. spread the load over more cores, "
                    "2. reduce your peak transmission load,"
                    "3. adjust your mapping algorithm.".format(
                        label, x, y, p, transmission_event_overflow))))

        data_items.append(
            ProvenanceDataItem(
                self._add_name(names,
                               "Times_the_callback_queue_was_overloaded"),
                callback_queue_overloaded,
                report=callback_queue_overloaded != 0,
                message=
                ("The callback queue for {} on {}, {}, {} overloaded on {} "
                 "occasions. This is often a sign that the system is running "
                 "too quickly for the number of neurons per core.  Please "
                 "increase the machine time step or time_scale_factor or "
                 "decrease the number of neurons per core.".format(
                     label, x, y, p, callback_queue_overloaded))))

        data_items.append(
            ProvenanceDataItem(
                self._add_name(names, "Times_the_dma_queue_was_overloaded"),
                dma_queue_overloaded,
                report=dma_queue_overloaded != 0,
                message=
                ("The DMA queue for {} on {}, {}, {} overloaded on {} "
                 "occasions. This is often a sign that the system is running "
                 "too quickly for the number of neurons per core.  Please "
                 "increase the machine time step or time_scale_factor or "
                 "decrease the number of neurons per core.".format(
                     label, x, y, p, dma_queue_overloaded))))

        data_items.append(
            ProvenanceDataItem(
                self._add_name(names, "Times_the_timer_tic_over_ran"),
                number_of_times_timer_tic_over_ran,
                report=number_of_times_timer_tic_over_ran > 4,
                message=
                ("A Timer tick callback was still executing when the next "
                 "timer tick callback was fired off for {} on {}, {}, {}, {} "
                 "times. This is a sign of the system being overloaded and "
                 "therefore the results are likely incorrect.  Please increase "
                 "the machine time step or time_scale_factor or decrease the "
                 "number of neurons per core".format(
                     label, x, y, p, number_of_times_timer_tic_over_ran))))

        data_items.append(
            ProvenanceDataItem(
                self._add_name(names,
                               "max_number_of_times_timer_tic_over_ran"),
                max_number_of_times_timer_tic_over_ran,
                report=max_number_of_times_timer_tic_over_ran > 4,
                message=
                ("The timer for {} on {}, {}, {} fell behind by up to {} "
                 "ticks. This is a sign of the system being overloaded and "
                 "therefore the results are likely incorrect. Please increase "
                 "the machine time step or time_scale_factor or decrease the "
                 "number of neurons per core".format(
                     label, x, y, p, max_number_of_times_timer_tic_over_ran))))

        return data_items
 def extract_provenance(self, executor):
     for (algorithm, run_time) in executor.algorithm_timings:
         names = ["pacman", "run_time_of_{}".format(algorithm)]
         self._data_items.append(ProvenanceDataItem(names, str(run_time)))
Exemple #8
0
    def _write_router_diagnostics(
            self, x, y, router_diagnostic, reinjector_status, expected):
        """ Stores router diagnostics as a set of provenance data items

        :param x: x coord of the router in question
        :param y: y coord of the router in question
        :param router_diagnostic: the router diagnostic object
        :param reinjector_status: the data gained from the reinjector
        :return: None
        """
        names = list()
        names.append("router_provenance")
        if expected:
            names.append("expected_routers")
        else:
            names.append("unexpected_routers")
        names.append("router_at_chip_{}_{}".format(x, y))

        items = list()

        items.append(ProvenanceDataItem(
            self._add_name(names, "Local_Multicast_Packets"),
            str(router_diagnostic.n_local_multicast_packets)))
        items.append(ProvenanceDataItem(
            self._add_name(names, "External_Multicast_Packets"),
            str(router_diagnostic.n_external_multicast_packets)))
        items.append(ProvenanceDataItem(
            self._add_name(names, "Dropped_Multicast_Packets"),
            str(router_diagnostic.n_dropped_multicast_packets),
            report=(
                router_diagnostic.n_dropped_multicast_packets > 0 and
                reinjector_status is None),
            message=(
                "The router on {}, {} has dropped {} multicast route packets. "
                "Try increasing the machine_time_step and/or the time scale "
                "factor or reducing the number of atoms per core."
                .format(x, y, router_diagnostic.n_dropped_multicast_packets))))
        items.append(ProvenanceDataItem(
            self._add_name(names, "Local_P2P_Packets"),
            str(router_diagnostic.n_local_peer_to_peer_packets)))
        items.append(ProvenanceDataItem(
            self._add_name(names, "External_P2P_Packets"),
            str(router_diagnostic.n_external_peer_to_peer_packets)))
        items.append(ProvenanceDataItem(
            self._add_name(names, "Dropped_P2P_Packets"),
            str(router_diagnostic.n_dropped_peer_to_peer_packets)))
        items.append(ProvenanceDataItem(
            self._add_name(names, "Local_NN_Packets"),
            str(router_diagnostic.n_local_nearest_neighbour_packets)))
        items.append(ProvenanceDataItem(
            self._add_name(names, "External_NN_Packets"),
            str(router_diagnostic.n_external_nearest_neighbour_packets)))
        items.append(ProvenanceDataItem(
            self._add_name(names, "Dropped_NN_Packets"),
            str(router_diagnostic.n_dropped_nearest_neighbour_packets)))
        items.append(ProvenanceDataItem(
            self._add_name(names, "Local_FR_Packets"),
            str(router_diagnostic.n_local_fixed_route_packets)))
        items.append(ProvenanceDataItem(
            self._add_name(names, "External_FR_Packets"),
            str(router_diagnostic.n_external_fixed_route_packets)))
        items.append(ProvenanceDataItem(
            self._add_name(names, "Dropped_FR_Packets"),
            str(router_diagnostic.n_dropped_fixed_route_packets)))
        if reinjector_status is not None:
            items.append(ProvenanceDataItem(
                self._add_name(names, "Received_For_Reinjection"),
                reinjector_status.n_dropped_packets))
            items.append(ProvenanceDataItem(
                self._add_name(names, "Missed_For_Reinjection"),
                reinjector_status.n_missed_dropped_packets,
                report=reinjector_status.n_missed_dropped_packets > 0,
                message=(
                    "The reinjector on {}, {} has missed {} packets.".format(
                        x, y, reinjector_status.n_missed_dropped_packets))))
            items.append(ProvenanceDataItem(
                self._add_name(names, "Reinjection_Overflows"),
                reinjector_status.n_dropped_packet_overflows,
                report=reinjector_status.n_dropped_packet_overflows > 0,
                message=(
                    "The reinjector on {}, {} has dropped {} packets.".format(
                        x, y, reinjector_status.n_dropped_packet_overflows))))
            items.append(ProvenanceDataItem(
                self._add_name(names, "Reinjected"),
                reinjector_status.n_reinjected_packets))

        return items