def get_provenance_data_from_machine(self, transceiver, placement):
        """ Get provenance from the machine

        :param transceiver: spinnman interface to the machine
        :param placement: the location of this vertex on the machine
        """
        provenance_data = self._read_provenance_data(transceiver, placement)
        provenance_items = self._read_basic_provenance_items(
            provenance_data, placement)
        provenance_data = self._get_remaining_provenance_data_items(
            provenance_data)
        _, _, _, _, names = self._get_placement_details(placement)

        provenance_items.append(
            ProvenanceDataItem(
                self._add_name(names, "lost_packets_without_payload"),
                provenance_data[0],
                report=provenance_data[0] > 0,
                message=
                ("The live packet gatherer has lost {} packets which have "
                 "payloads during its execution. Try increasing the machine "
                 "time step or increasing the time scale factor. If you are "
                 "running in real time, try reducing the number of vertices "
                 "which are feeding this live packet gatherer".format(
                     provenance_data[0]))))
        provenance_items.append(
            ProvenanceDataItem(
                self._add_name(names, "lost_packets_with_payload"),
                provenance_data[1],
                report=provenance_data[1] > 0,
                message=
                ("The live packet gatherer has lost {} packets which do not "
                 "have payloads during its execution. Try increasing the "
                 "machine time step or increasing the time scale factor. If "
                 "you are running in real time, try reducing the number of "
                 "vertices which are feeding this live packet gatherer".format(
                     provenance_data[1]))))

        return provenance_items
Esempio n. 2
0
    def extract_provenance(self, executor):
        """ Acquires the timings from PACMAN algorithms (provenance data)

        :param ~pacman.executor.PACMANAlgorithmExecutor executor:
            the PACMAN workflow executor
        :rtype: None
        """
        for (algorithm, run_time, exec_names) in executor.algorithm_timings:
            key = "run_time_of_{}".format(algorithm)
            if key not in self.__already_done:
                names = [self.TOP_NAME, exec_names, key]
                self.__data_items.append(ProvenanceDataItem(names, run_time))
                self.__already_done.add(key)
    def _parse_neuron_provenance(self, label, names, provenance_data):
        """ Extract and yield neuron provenance

        :param str label: The label of the node
        :param list(str) names: The hierarchy of names for the provenance data
        :param list(int) provenance_data: A list of data items to interpret
        :return: a list of provenance data items
        :rtype: iterator of ProvenanceDataItem
        """
        neuron_prov = NeuronProvenance(*provenance_data)

        yield ProvenanceDataItem(
            names + ["Last_timer_tic_the_core_ran_to"],
            neuron_prov.current_timer_tick)
        yield self._app_vertex.get_tdma_provenance_item(
            names, label, neuron_prov.n_tdma_misses)
        yield ProvenanceDataItem(
            names + ["Earliest_send_time"], neuron_prov.earliest_send)
        yield ProvenanceDataItem(
            names + ["Latest_Send_time"], neuron_prov.latest_send)

        return NeuronProvenance.N_ITEMS
    def _parse_synapse_provenance(self, label, names, provenance_data):
        """ Extract and yield synapse provenance

        :param str label: The label of the node
        :param list(str) names: The hierarchy of names for the provenance data
        :param list(int) provenance_data: A list of data items to interpret
        :return: a list of provenance data items
        :rtype: iterator of ProvenanceDataItem
        """
        synapse_prov = SynapseProvenance(*provenance_data)

        yield ProvenanceDataItem(
            names + [self.TOTAL_PRE_SYNAPTIC_EVENT_NAME],
            synapse_prov.n_pre_synaptic_events)
        yield ProvenanceDataItem(
            names + [self.SATURATION_COUNT_NAME],
            synapse_prov.n_saturations, synapse_prov.n_saturations > 0,
            f"The weights from the synapses for {label} saturated "
            f"{synapse_prov.n_saturations} times. If this causes issues you "
            "can increase the spikes_per_second and / or ring_buffer_sigma "
            "values located within the .spynnaker.cfg file.")
        yield ProvenanceDataItem(
            names + [self.SATURATED_PLASTIC_WEIGHTS_NAME],
            synapse_prov.n_plastic_saturations,
            synapse_prov.n_plastic_saturations > 0,
            f"The weights from the plastic synapses for {label} saturated "
            f"{synapse_prov.n_plastic_saturations} times. If this causes "
            "issues increase the spikes_per_second and / or ring_buffer_sigma"
            " values located within the .spynnaker.cfg file.")
        yield ProvenanceDataItem(
            names + [self.GHOST_SEARCHES], synapse_prov.n_ghost_searches,
            synapse_prov.n_ghost_searches > 0,
            f"The number of failed population table searches for {label} was "
            f"{synapse_prov.n_ghost_searches}. If this number is large "
            "relative to the  predicted incoming spike rate, try increasing "
            " source and target neurons per core")
        yield ProvenanceDataItem(
            names + [self.BIT_FIELDS_NOT_READ],
            synapse_prov.n_failed_bitfield_reads, False,
            f"On {label}, the filter for stopping redundant DMAs couldn't be "
            f"fully filled in; it failed to read "
            f"{synapse_prov.n_failed_bitfield_reads} entries. "
            "Try reducing neurons per core.")
        yield ProvenanceDataItem(
            names + [self.INVALID_MASTER_POP_HITS],
            synapse_prov.n_invalid_pop_table_hits,
            synapse_prov.n_invalid_pop_table_hits > 0,
            f"On {label}, there were {synapse_prov.n_invalid_pop_table_hits} "
            "keys received that had no master pop entry for them. This is an "
            "error, which most likely stems from bad routing.")
        yield ProvenanceDataItem(
            names + [self.BIT_FIELD_FILTERED_PACKETS],
            synapse_prov.n_filtered_by_bitfield)
    def parse_extra_provenance_items(self, label, names, provenance_data):
        yield from self._parse_neuron_provenance(
            label, names, provenance_data[:NeuronProvenance.N_ITEMS])

        neuron_prov = NeuronMainProvenance(
            *provenance_data[-NeuronMainProvenance.N_ITEMS:])

        yield ProvenanceDataItem(
            names + ["Timer tick overruns"], neuron_prov.n_timer_overruns,
            neuron_prov.n_timer_overruns > 0,
            f"Vertex {label} overran on {neuron_prov.n_timer_overruns} "
            "timesteps. This may mean that the simulation results are invalid."
            " Try with fewer neurons per core, increasing the time"
            " scale factor, or reducing the number of spikes sent")
def _create_provenenace():
    items = []
    items.append(ProvenanceDataItem(["core1", "value1"], 23))
    items.append(ProvenanceDataItem(["core1", "value2"], 45))
    items.append(ProvenanceDataItem(["core1", "value3"], 67))
    items.append(ProvenanceDataItem(["core2", "value1"], "bacon"))
    items.append(ProvenanceDataItem(["core2", "value2"], 23))
    items.append(ProvenanceDataItem(["core2", "value3"], 45))
    return items
Esempio n. 7
0
    def _check_bit_field_router_compressor_for_success(
            self, executable_targets, transceiver, host_chips,
            sorter_binary_path, prov_data_items):
        """ Goes through the cores checking for cores that have failed to\
            generate the compressed routing tables with bitfield

        :param ExecutableTargets executable_targets:
            cores to load router compressor with bitfield on
        :param ~.Transceiver transceiver: SpiNNMan instance
        :param list(tuple(int,int)) host_chips:
            the chips which need to be ran on host.
        :param str sorter_binary_path: the path to the sorter binary
        :param list(ProvenanceDataItem) prov_data_items:
            the store of data items
        :rtype: bool
        """
        sorter_cores = executable_targets.get_cores_for_binary(
            sorter_binary_path)
        for core_subset in sorter_cores:
            x = core_subset.x
            y = core_subset.y

            # prov names
            names = list()
            names.append(PROV_TOP_NAME)
            names.append(PROV_CHIP_NAME.format(x, y))
            names.append(MERGED_NAME)

            for p in core_subset.processor_ids:

                # Read the result from USER1/USER2 registers
                user_1_base_address = \
                    transceiver.get_user_1_register_address_from_core(p)
                user_2_base_address = \
                    transceiver.get_user_2_register_address_from_core(p)
                result = struct.unpack(
                    "<I", transceiver.read_memory(
                        x, y, user_1_base_address, self._USER_BYTES))[0]
                total_bit_fields_merged = struct.unpack(
                    "<I", transceiver.read_memory(
                        x, y, user_2_base_address, self._USER_BYTES))[0]

                if result != self.SUCCESS:
                    if (x, y) not in host_chips:
                        host_chips.append((x, y))
                    return False
                prov_data_items.append(ProvenanceDataItem(
                    names, str(total_bit_fields_merged)))
        return True
 def get_provenance_data(self):
     name = "{}_{}_{}".format(
         self._pre_population.label, self._post_population.label,
         self.__class__.__name__)
     return [ProvenanceDataItem(
         [name, "Times_synaptic_delays_got_clipped"],
         self._n_clipped_delays,
         report=self._n_clipped_delays > 0,
         message=(
             "The delays in the connector {} from {} to {} was clipped "
             "to {} a total of {} times.  This can be avoided by reducing "
             "the timestep or increasing the minimum delay to one "
             "timestep".format(
                 self.__class__.__name__, self._pre_population.label,
                 self._post_population.label, self._min_delay,
                 self._n_clipped_delays)))]
Esempio n. 9
0
def get_lut_provenance(pre_population_label, post_population_label, rule_name,
                       entry_name, param_name, last_entry):
    # pylint: disable=too-many-arguments
    top_level_name = "{}_{}_STDP_{}".format(pre_population_label,
                                            post_population_label, rule_name)
    return ProvenanceDataItem(
        [top_level_name, entry_name],
        last_entry,
        report=last_entry > 0,
        message=(
            "The last entry in the STDP exponential lookup table for the {}"
            " parameter of the {} between {} and {} was {} rather than 0,"
            " indicating that the lookup table was not big enough at this"
            " timestep and value.  Try reducing the parameter value, or"
            " increasing the timestep".format(param_name, rule_name,
                                              pre_population_label,
                                              post_population_label,
                                              last_entry)))
Esempio n. 10
0
 def get_provenance_data(self, synapse_info):
     """
     :param SynapseInformation synapse_info:
     :rtype:
         iterable(~spinn_front_end_common.utilities.utility_objs.ProvenanceDataItem)
     """
     name = "connector_{}_{}_{}".format(synapse_info.pre_population.label,
                                        synapse_info.post_population.label,
                                        self.__class__.__name__)
     # Convert to native Python integer; provenance system assumption
     ncd = self.__n_clipped_delays.item()
     yield ProvenanceDataItem([name, "Times_synaptic_delays_got_clipped"],
                              ncd,
                              report=(ncd > 0),
                              message=self._CLIPPED_MSG.format(
                                  self.__class__.__name__,
                                  synapse_info.pre_population.label,
                                  synapse_info.post_population.label,
                                  self.__min_delay, ncd))
    def get_provenance_data_from_machine(self, transceiver, placement):
        # get prov data
        provenance_data = self._read_provenance_data(transceiver, placement)
        # get system level prov
        provenance_items = self._read_basic_provenance_items(
            provenance_data, placement)
        # get left over prov
        provenance_data = self._get_remaining_provenance_data_items(
            provenance_data)
        # stuff for making prov data items
        label, x, y, p, names = self._get_placement_details(placement)

        # get the only app level prov item
        n_buffer_overflows = provenance_data[0]

        # build it
        provenance_items.append(ProvenanceDataItem(
            self._add_name(names, self.INPUT_BUFFER_FULL_NAME),
            n_buffer_overflows, report=n_buffer_overflows > 0,
            message=self.INPUT_BUFFER_FULL_MESSAGE.format(
                label, x, y, p, n_buffer_overflows)))
        return provenance_items
Esempio n. 12
0
 def get_provenance_data(self, synapse_info):
     """
     :param SynapseInformation synapse_info:
     :rtype:
         list(~spinn_front_end_common.utilities.utility_objs.ProvenanceDataItem)
     """
     name = "connector_{}_{}_{}".format(synapse_info.pre_population.label,
                                        synapse_info.post_population.label,
                                        self.__class__.__name__)
     return [
         ProvenanceDataItem(
             [name, "Times_synaptic_delays_got_clipped"],
             self.__n_clipped_delays,
             report=self.__n_clipped_delays > 0,
             message=
             ("The delays in the connector {} from {} to {} was clipped "
              "to {} a total of {} times.  This can be avoided by reducing "
              "the timestep or increasing the minimum delay to one "
              "timestep".format(self.__class__.__name__,
                                synapse_info.pre_population.label,
                                synapse_info.post_population.label,
                                self.__min_delay, self.__n_clipped_delays)))
     ]
    def get_provenance_data_from_machine(self, transceiver, placement):
        # get data from basic prov
        provenance_data = self._read_provenance_data(transceiver, placement)
        provenance_items = self._read_basic_provenance_items(
            provenance_data, placement)
        provenance_data = self._get_remaining_provenance_data_items(
            provenance_data)

        # get item in data
        queue_overflows = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.QUEUE_OVERFLOWS.value]
        label, x, y, p, names = self._get_placement_details(placement)

        # translate into provenance data items
        provenance_items.append(ProvenanceDataItem(
            self._add_name(names, "Time_queue_overflows"),
            queue_overflows,
            report=queue_overflows > 0,
            message=(
                "The packets acquired by core {}:{}:{} running model {} "
                "failed to keep {} items in its buffer. Unknown how to "
                "rectify".format(x, y, p, self.get_binary_file_name(),
                                 queue_overflows))))
        return provenance_items
    def get_provenance_data_from_machine(self, transceiver, placement):
        provenance_data = self._read_provenance_data(transceiver, placement)
        provenance_items = self._read_basic_provenance_items(
            provenance_data, placement)
        provenance_data = self._get_remaining_provenance_data_items(
            provenance_data)

        n_saturations = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.SATURATION_COUNT.value]
        n_buffer_overflows = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.BUFFER_OVERFLOW_COUNT.value]
        n_pre_synaptic_events = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.PRE_SYNAPTIC_EVENT_COUNT.value]
        last_timer_tick = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.CURRENT_TIMER_TIC.value]
        n_plastic_saturations = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.
            PLASTIC_SYNAPTIC_WEIGHT_SATURATION_COUNT.value]

        label, x, y, p, names = self._get_placement_details(placement)

        # translate into provenance data items
        provenance_items.append(
            ProvenanceDataItem(
                self._add_name(names, "Times_synaptic_weights_have_saturated"),
                n_saturations,
                report=n_saturations > 0,
                message=
                ("The weights from the synapses for {} on {}, {}, {} saturated "
                 "{} times. If this causes issues you can increase the "
                 "spikes_per_second and / or ring_buffer_sigma "
                 "values located within the .spynnaker.cfg file.".format(
                     label, x, y, p, n_saturations))))
        provenance_items.append(
            ProvenanceDataItem(
                self._add_name(names, "Times_the_input_buffer_lost_packets"),
                n_buffer_overflows,
                report=n_buffer_overflows > 0,
                message=
                ("The input buffer for {} on {}, {}, {} lost packets on {} "
                 "occasions. This is often a sign that the system is running "
                 "too quickly for the number of neurons per core.  Please "
                 "increase the timer_tic or time_scale_factor or decrease the "
                 "number of neurons per core.".format(label, x, y, p,
                                                      n_buffer_overflows))))
        provenance_items.append(
            ProvenanceDataItem(
                self._add_name(names, "Total_pre_synaptic_events"),
                n_pre_synaptic_events))
        provenance_items.append(
            ProvenanceDataItem(
                self._add_name(names, "Last_timer_tic_the_core_ran_to"),
                last_timer_tick))
        provenance_items.append(
            ProvenanceDataItem(
                self._add_name(
                    names, "Times_plastic_synaptic_weights_have_saturated"),
                n_plastic_saturations,
                report=n_plastic_saturations > 0,
                message=
                ("The weights from the plastic synapses for {} on {}, {}, {} "
                 "saturated {} times. If this causes issue increase the "
                 "spikes_per_second and / or ring_buffer_sigma values located "
                 "within the .spynnaker.cfg file.".format(
                     label, x, y, p, n_plastic_saturations))))

        return provenance_items
Esempio n. 15
0
    def __add_router_diagnostics(self, items, x, y, router_diagnostic,
                                 reinjection_status, expected, router_table):
        """ Stores router diagnostics as a set of provenance data items.

        :param list(ProvenanceDataItem) items: the list to append to
        :param int x: x coordinate of the router in question
        :param int y: y coordinate of the router in question
        :param ~.RouterDiagnostics router_diagnostic:
            the router diagnostic object
        :param ReInjectionStatus reinjection_status:
            the data gained from the extra monitor re-injection subsystem
        :param ~.MulticastRoutingTable router_table:
            the router table generated by the PACMAN tools
        """
        # pylint: disable=too-many-arguments
        names = list()
        names.append("router_provenance")
        if expected:
            names.append("expected_routers")
        else:
            names.append("unexpected_routers")
        names.append("router_at_chip_{}_{}".format(x, y))

        items.append(
            ProvenanceDataItem(
                self.__add_name(names, "Local_Multicast_Packets"),
                str(router_diagnostic.n_local_multicast_packets)))
        items.append(
            ProvenanceDataItem(
                self.__add_name(names, "External_Multicast_Packets"),
                str(router_diagnostic.n_external_multicast_packets)))
        items.append(
            ProvenanceDataItem(
                self.__add_name(names, "Dropped_Multicast_Packets"),
                str(router_diagnostic.n_dropped_multicast_packets),
                report=(router_diagnostic.n_dropped_multicast_packets > 0
                        and reinjection_status is None),
                message=
                ("The router on {}, {} has dropped {} multicast route packets. "
                 "Try increasing the machine_time_step and/or the time scale "
                 "factor or reducing the number of atoms per core.".format(
                     x, y, router_diagnostic.n_dropped_multicast_packets))))
        items.append(
            ProvenanceDataItem(
                self.__add_name(
                    names, "Dropped_Multicast_Packets_via_local_transmission"),
                str(router_diagnostic.user_3),
                report=(router_diagnostic.user_3 > 0),
                message=(
                    "The router on {}, {} has dropped {} multicast packets that"
                    " were transmitted by local cores. This occurs where the "
                    "router has no entry associated with the multi-cast key. "
                    "Try investigating the keys allocated to the vertices and "
                    "the router table entries for this chip.".format(
                        x, y, router_diagnostic.user_3))))
        items.append(
            ProvenanceDataItem(
                self.__add_name(names,
                                "default_routed_external_multicast_packets"),
                str(router_diagnostic.user_2),
                report=(router_diagnostic.user_2 > 0 and
                        ((router_table is not None
                          and router_table.number_of_defaultable_entries == 0)
                         or router_table is None)),
                message=
                ("The router on {}, {} has default routed {} multicast packets,"
                 " but the router table did not expect any default routed "
                 "packets. This occurs where the router has no entry"
                 " associated with the multi-cast key. "
                 "Try investigating the keys allocated to the vertices and "
                 "the router table entries for this chip.".format(
                     x, y, router_diagnostic.user_2))))

        items.append(
            ProvenanceDataItem(
                self.__add_name(names, "Local_P2P_Packets"),
                str(router_diagnostic.n_local_peer_to_peer_packets)))
        items.append(
            ProvenanceDataItem(
                self.__add_name(names, "External_P2P_Packets"),
                str(router_diagnostic.n_external_peer_to_peer_packets)))
        items.append(
            ProvenanceDataItem(
                self.__add_name(names, "Dropped_P2P_Packets"),
                str(router_diagnostic.n_dropped_peer_to_peer_packets)))
        items.append(
            ProvenanceDataItem(
                self.__add_name(names, "Local_NN_Packets"),
                str(router_diagnostic.n_local_nearest_neighbour_packets)))
        items.append(
            ProvenanceDataItem(
                self.__add_name(names, "External_NN_Packets"),
                str(router_diagnostic.n_external_nearest_neighbour_packets)))
        items.append(
            ProvenanceDataItem(
                self.__add_name(names, "Dropped_NN_Packets"),
                str(router_diagnostic.n_dropped_nearest_neighbour_packets)))
        items.append(
            ProvenanceDataItem(
                self.__add_name(names, "Local_FR_Packets"),
                str(router_diagnostic.n_local_fixed_route_packets)))
        items.append(
            ProvenanceDataItem(
                self.__add_name(names, "External_FR_Packets"),
                str(router_diagnostic.n_external_fixed_route_packets)))
        items.append(
            ProvenanceDataItem(
                self.__add_name(names, "Dropped_FR_Packets"),
                str(router_diagnostic.n_dropped_fixed_route_packets),
                report=router_diagnostic.n_dropped_fixed_route_packets > 0,
                message=(
                    "The router on chip {}:{} dropped {} Fixed route packets. "
                    "This is indicative of a error within the data extraction "
                    "process as this is the only expected user of fixed route "
                    "packets.".format(
                        x, y,
                        router_diagnostic.n_dropped_fixed_route_packets))))
        if reinjection_status is not None:
            items.append(
                ProvenanceDataItem(
                    self.__add_name(names, "Received_For_Reinjection"),
                    reinjection_status.n_dropped_packets))
            items.append(
                ProvenanceDataItem(
                    self.__add_name(names, "Missed_For_Reinjection"),
                    reinjection_status.n_missed_dropped_packets,
                    report=reinjection_status.n_missed_dropped_packets > 0,
                    message=(
                        "The extra monitor on {}, {} has missed {} "
                        "packets.".format(
                            x, y,
                            reinjection_status.n_missed_dropped_packets))))
            items.append(
                ProvenanceDataItem(
                    self.__add_name(names, "Reinjection_Overflows"),
                    reinjection_status.n_dropped_packet_overflows,
                    report=reinjection_status.n_dropped_packet_overflows > 0,
                    message=(
                        "The extra monitor on {}, {} has dropped {} "
                        "packets.".format(
                            x, y,
                            reinjection_status.n_dropped_packet_overflows))))
            items.append(
                ProvenanceDataItem(self.__add_name(names, "Reinjected"),
                                   reinjection_status.n_reinjected_packets))
            items.append(
                ProvenanceDataItem(
                    self.__add_name(names, "Dumped_from_a_Link"),
                    str(reinjection_status.n_link_dumps),
                    report=(reinjection_status.n_link_dumps > 0
                            and self._has_virtual_chip_connected(
                                self._machine, x, y)),
                    message=
                    ("The extra monitor on {}, {} has detected that {} packets "
                     "were dumped from a outgoing link of this chip's router."
                     " This often occurs when external devices are used in the "
                     "script but not connected to the communication fabric "
                     "correctly. These packets may have been reinjected "
                     "multiple times and so this number may be a overestimate."
                     .format(x, y, reinjection_status.n_link_dumps))))
            items.append(
                ProvenanceDataItem(
                    self.__add_name(names, "Dumped_from_a_processor"),
                    str(reinjection_status.n_processor_dumps),
                    report=reinjection_status.n_processor_dumps > 0,
                    message=
                    ("The extra monitor on {}, {} has detected that {} packets "
                     "were dumped from a core failing to take the packet."
                     " This often occurs when the executable has crashed or"
                     " has not been given a multicast packet callback. It can"
                     " also result from the core taking too long to process"
                     " each packet. These packets were reinjected and so this"
                     " number is likely a overestimate.".format(
                         x, y, reinjection_status.n_processor_dumps))))

        items.append(
            ProvenanceDataItem(
                self.__add_name(names, "Error status"),
                str(router_diagnostic.error_status),
                report=router_diagnostic.error_status > 0,
                message=
                ("The router on {}, {} has a non-zero error status.  This could"
                 " indicate a hardware fault.  The errors set are {}, and the"
                 " error count is {}".format(x, y,
                                             router_diagnostic.errors_set,
                                             router_diagnostic.error_count))))
    def _parse_spike_processing_fast_provenance(
            self, label, names, provenance_data):
        """ Extract and yield spike processing provenance

        :param str label: The label of the node
        :param list(str) names: The hierarchy of names for the provenance data
        :param list(int) provenance_data: A list of data items to interpret
        :return: a list of provenance data items
        :rtype: iterator of ProvenanceDataItem
        """
        prov = SpikeProcessingFastProvenance(*provenance_data)

        yield ProvenanceDataItem(
            names + [self.INPUT_BUFFER_FULL_NAME],
            prov.n_buffer_overflows,
            prov.n_buffer_overflows > 0,
            f"The input buffer for {label} lost packets on "
            f"{prov.n_buffer_overflows} occasions. This is often a "
            "sign that the system is running too quickly for the number of "
            "neurons per core.  Please increase the timer_tic or"
            " time_scale_factor or decrease the number of neurons per core.")
        yield ProvenanceDataItem(
            names + [self.DMA_COMPLETE], prov.n_dmas_complete)
        yield ProvenanceDataItem(
            names + [self.SPIKES_PROCESSED],
            prov.n_spikes_processed)
        yield ProvenanceDataItem(
            names + [self.N_REWIRES_NAME], prov.n_rewires)

        late_message = (
            f"On {label}, {prov.n_late_packets} packets were dropped "
            "from the input buffer, because they arrived too late to be "
            "processed in a given time step. Try increasing the "
            "time_scale_factor located within the .spynnaker.cfg file or in "
            "the pynn.setup() method."
            if self._app_vertex.drop_late_spikes else
            f"On {label}, {prov.n_late_packets} packets arrived too "
            "late to be processed in a given time step. Try increasing the "
            "time_scale_factor located within the .spynnaker.cfg file or in "
            "the pynn.setup() method.")
        yield ProvenanceDataItem(
            names + [self.N_LATE_SPIKES_NAME], prov.n_late_packets,
            prov.n_late_packets > 0, late_message)

        yield ProvenanceDataItem(
            names + [self.MAX_FILLED_SIZE_OF_INPUT_BUFFER_NAME],
            prov.max_size_input_buffer, report=False)
        yield ProvenanceDataItem(
            names + [self.MAX_SPIKES_RECEIVED], prov.max_spikes_received)
        yield ProvenanceDataItem(
            names + [self.MAX_SPIKES_PROCESSED], prov.max_spikes_processed)
        yield ProvenanceDataItem(
            names + [self.N_TRANSFER_TIMER_OVERRUNS],
            prov.n_transfer_timer_overruns, prov.n_transfer_timer_overruns > 0,
            f"On {label}, the transfer of synaptic inputs to SDRAM did not end"
            " before the next timer tick started"
            f" {prov.n_transfer_timer_overruns} times with a maximum overrun"
            f" of {prov.max_transfer_timer_overrun}.  Try increasing "
            " transfer_overhead_clocks in your .spynnaker.cfg file.")
        yield ProvenanceDataItem(
            names + [self.N_SKIPPED_TIME_STEPS], prov.n_skipped_time_steps,
            prov.n_skipped_time_steps > 0,
            f"On {label}, synaptic processing did not start on"
            f" {prov.n_skipped_time_steps} time steps.  Try increasing the "
            "time_scale_factor located within the .spynnaker.cfg file or in "
            "the pynn.setup() method.")
        yield ProvenanceDataItem(
            names + [self.MAX_TRANSFER_TIMER_OVERRUNS],
            prov.max_transfer_timer_overrun)
    def _read_basic_provenance_items(self, provenance_data, placement):
        transmission_event_overflow = provenance_data[
            self.PROVENANCE_DATA_ENTRIES.TRANSMISSION_EVENT_OVERFLOW.value]
        callback_queue_overloaded = provenance_data[
            self.PROVENANCE_DATA_ENTRIES.CALLBACK_QUEUE_OVERLOADED.value]
        dma_queue_overloaded = provenance_data[
            self.PROVENANCE_DATA_ENTRIES.DMA_QUEUE_OVERLOADED.value]
        number_of_times_timer_tic_over_ran = provenance_data[
            self.PROVENANCE_DATA_ENTRIES.TIMER_TIC_HAS_OVERRUN.value]
        max_number_of_times_timer_tic_over_ran = provenance_data[
            self.PROVENANCE_DATA_ENTRIES.MAX_NUMBER_OF_TIMER_TIC_OVERRUN.value]

        # create provenance data items for returning
        label, x, y, p, names = self._get_placement_details(placement)
        data_items = list()
        data_items.append(
            ProvenanceDataItem(
                self._add_name(names,
                               "Times_the_transmission_of_spikes_overran"),
                transmission_event_overflow,
                report=transmission_event_overflow != 0,
                message=(
                    "The transmission buffer for {} on {}, {}, {} was blocked "
                    "on {} occasions. This is often a sign that the system is "
                    "experiencing back pressure from the communication fabric. "
                    "Please either: "
                    "1. spread the load over more cores, "
                    "2. reduce your peak transmission load,"
                    "3. adjust your mapping algorithm.".format(
                        label, x, y, p, transmission_event_overflow))))

        data_items.append(
            ProvenanceDataItem(
                self._add_name(names,
                               "Times_the_callback_queue_was_overloaded"),
                callback_queue_overloaded,
                report=callback_queue_overloaded != 0,
                message=
                ("The callback queue for {} on {}, {}, {} overloaded on {} "
                 "occasions. This is often a sign that the system is running "
                 "too quickly for the number of neurons per core.  Please "
                 "increase the machine time step or time_scale_factor or "
                 "decrease the number of neurons per core.".format(
                     label, x, y, p, callback_queue_overloaded))))

        data_items.append(
            ProvenanceDataItem(
                self._add_name(names, "Times_the_dma_queue_was_overloaded"),
                dma_queue_overloaded,
                report=dma_queue_overloaded != 0,
                message=
                ("The DMA queue for {} on {}, {}, {} overloaded on {} "
                 "occasions. This is often a sign that the system is running "
                 "too quickly for the number of neurons per core.  Please "
                 "increase the machine time step or time_scale_factor or "
                 "decrease the number of neurons per core.".format(
                     label, x, y, p, dma_queue_overloaded))))

        data_items.append(
            ProvenanceDataItem(
                self._add_name(names, "Times_the_timer_tic_over_ran"),
                number_of_times_timer_tic_over_ran,
                report=number_of_times_timer_tic_over_ran > 4,
                message=
                ("A Timer tick callback was still executing when the next "
                 "timer tick callback was fired off for {} on {}, {}, {}, {} "
                 "times. This is a sign of the system being overloaded and "
                 "therefore the results are likely incorrect.  Please increase "
                 "the machine time step or time_scale_factor or decrease the "
                 "number of neurons per core".format(
                     label, x, y, p, number_of_times_timer_tic_over_ran))))

        data_items.append(
            ProvenanceDataItem(
                self._add_name(names,
                               "max_number_of_times_timer_tic_over_ran"),
                max_number_of_times_timer_tic_over_ran,
                report=max_number_of_times_timer_tic_over_ran > 4,
                message=
                ("The timer for {} on {}, {}, {} fell behind by up to {} "
                 "ticks. This is a sign of the system being overloaded and "
                 "therefore the results are likely incorrect. Please increase "
                 "the machine time step or time_scale_factor or decrease the "
                 "number of neurons per core".format(
                     label, x, y, p, max_number_of_times_timer_tic_over_ran))))

        return data_items
Esempio n. 18
0
    def get_provenance_data_from_machine(self, transceiver, placement):
        provenance_data = self._read_provenance_data(transceiver, placement)
        provenance_items = self._read_basic_provenance_items(
            provenance_data, placement)
        provenance_data = self._get_remaining_provenance_data_items(
            provenance_data)

        times_timer_tic_overran = 0
        for item in provenance_items:
            if item.names[-1] == self._TIMER_TICK_OVERRUN:
                times_timer_tic_overran = item.value

        n_saturations = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.SATURATION_COUNT.value]
        n_buffer_overflows = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.BUFFER_OVERFLOW_COUNT.value]
        n_pre_synaptic_events = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.PRE_SYNAPTIC_EVENT_COUNT.value]
        last_timer_tick = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.CURRENT_TIMER_TIC.value]
        n_plastic_saturations = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.
            PLASTIC_SYNAPTIC_WEIGHT_SATURATION_COUNT.value]
        n_ghost_searches = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.GHOST_POP_TABLE_SEARCHES.value]
        failed_to_read_bit_fields = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.FAILED_TO_READ_BIT_FIELDS.value]
        dma_completes = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.DMA_COMPLETES.value]
        spike_processing_count = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.SPIKE_PROGRESSING_COUNT.value]
        invalid_master_pop_hits = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.INVALID_MASTER_POP_HITS.value]
        n_packets_filtered_by_bit_field_filter = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.BIT_FIELD_FILTERED_COUNT.value]
        n_rewires = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.N_REWIRES.value]
        n_late_packets = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.N_LATE_SPIKES.value]
        input_buffer_max_filled_size = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.INPUT_BUFFER_FILLED_SIZE.value]
        tdma_misses = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.TDMA_MISSES.value]

        label, x, y, p, names = self._get_placement_details(placement)

        # translate into provenance data items
        provenance_items.append(
            ProvenanceDataItem(self._add_name(names,
                                              self.SATURATION_COUNT_NAME),
                               n_saturations,
                               report=n_saturations > 0,
                               message=self.SATURATION_COUNT_MESSAGE.format(
                                   label, x, y, p, n_saturations)))
        provenance_items.append(
            ProvenanceDataItem(self._add_name(names,
                                              self.INPUT_BUFFER_FULL_NAME),
                               n_buffer_overflows,
                               report=n_buffer_overflows > 0,
                               message=self.INPUT_BUFFER_FULL_MESSAGE.format(
                                   label, x, y, p, n_buffer_overflows)))
        provenance_items.append(
            ProvenanceDataItem(
                self._add_name(names, self.TOTAL_PRE_SYNAPTIC_EVENT_NAME),
                n_pre_synaptic_events))
        provenance_items.append(
            ProvenanceDataItem(
                self._add_name(names, self.LAST_TIMER_TICK_NAME),
                last_timer_tick))
        provenance_items.append(
            ProvenanceDataItem(
                self._add_name(names, self.SATURATED_PLASTIC_WEIGHTS_NAME),
                n_plastic_saturations,
                report=n_plastic_saturations > 0,
                message=self.SATURATED_PLASTIC_WEIGHTS_MESSAGE.format(
                    label, x, y, p, n_plastic_saturations)))
        provenance_items.append(
            ProvenanceDataItem(self._add_name(names, self.N_RE_WIRES_NAME),
                               n_rewires))
        provenance_items.append(
            ProvenanceDataItem(
                self._add_name(names, self.GHOST_SEARCHES),
                n_ghost_searches,
                report=n_ghost_searches > 0,
                message=
                ("The number of failed population table searches for {} on {},"
                 " {}, {} was {}. If this number is large relative to the "
                 "predicted incoming spike rate, try increasing source and "
                 "target neurons per core".format(label, x, y, p,
                                                  n_ghost_searches))))
        provenance_items.append(
            ProvenanceDataItem(
                self._add_name(names, self.BIT_FIELDS_NOT_READ),
                failed_to_read_bit_fields,
                report=False,
                message=
                ("The filter for stopping redundant DMA's couldn't be fully "
                 "filled in, it failed to read {} entries, which means it "
                 "required a max of {} extra bytes of DTCM (assuming cores "
                 "have at max 255 neurons. Try reducing neurons per core, or "
                 "size of buffers, or neuron params per neuron etc.".format(
                     failed_to_read_bit_fields, failed_to_read_bit_fields *
                     self.WORDS_TO_COVER_256_ATOMS))))
        provenance_items.append(
            ProvenanceDataItem(self._add_name(names, self.DMA_COMPLETE),
                               dma_completes))
        provenance_items.append(
            ProvenanceDataItem(self._add_name(names, self.SPIKES_PROCESSED),
                               spike_processing_count))
        provenance_items.append(
            ProvenanceDataItem(
                self._add_name(names, self.INVALID_MASTER_POP_HITS),
                invalid_master_pop_hits,
                report=invalid_master_pop_hits > 0,
                message=
                ("There were {} keys which were received by core {}:{}:{} which"
                 " had no master pop entry for it. This is a error, which most "
                 "likely strives from bad routing.".format(
                     invalid_master_pop_hits, x, y, p))))
        provenance_items.append((ProvenanceDataItem(
            self._add_name(names, self.BIT_FIELD_FILTERED_PACKETS),
            n_packets_filtered_by_bit_field_filter,
            report=(n_packets_filtered_by_bit_field_filter > 0 and
                    (n_buffer_overflows > 0 or times_timer_tic_overran > 0)),
            message=(
                "There were {} packets received by {}:{}:{} that were "
                "filtered by the Bitfield filterer on the core. These packets "
                "were having to be stored and processed on core, which means "
                "the core may not be running as efficiently as it could. "
                "Please adjust the network or the mapping so that these "
                "packets are filtered in the router to improve "
                "performance.".format(n_packets_filtered_by_bit_field_filter,
                                      x, y, p)))))
        late_message = (self._N_LATE_SPIKES_MESSAGE_DROP
                        if self.__drop_late_spikes else
                        self._N_LATE_SPIKES_MESSAGE_NO_DROP)
        provenance_items.append(
            ProvenanceDataItem(self._add_name(names, self._N_LATE_SPIKES_NAME),
                               n_late_packets,
                               report=n_late_packets > 0,
                               message=late_message.format(
                                   n_late_packets, label, x, y, p)))

        provenance_items.append(
            ProvenanceDataItem(self._add_name(
                names, self._MAX_FILLED_SIZE_OF_INPUT_BUFFER_NAME),
                               input_buffer_max_filled_size,
                               report=False))

        provenance_items.append(
            self._app_vertex.get_tdma_provenance_item(names, x, y, p,
                                                      tdma_misses))
        return provenance_items
Esempio n. 19
0
    def parse_extra_provenance_items(self, label, names, provenance_data):
        (n_received, n_processed, n_added, n_sent, n_overflows, n_delays,
         n_tdma_behind, n_sat, n_bad_neuron, n_bad_keys, n_late_spikes, max_bg,
         n_bg_overloads) = provenance_data

        # translate into provenance data items
        yield ProvenanceDataItem(
            names + [self.COUNT_SATURATION_NAME], n_sat, (n_sat != 0),
            f"The delay extension {label} has dropped {n_sat} packets because "
            "during certain time steps a neuron was asked to spike more than "
            "256 times. This causes a saturation on the count tracker which "
            "is a uint8. Reduce the packet rates, or modify the delay "
            "extension to have larger counters.")
        yield ProvenanceDataItem(
            names + [self.INVALID_NEURON_ID_COUNT_NAME], n_bad_neuron,
            (n_bad_neuron != 0),
            f"The delay extension {label} has dropped {n_bad_neuron} packets "
            "because their neuron id was not valid. This is likely a routing "
            "issue. Please fix and try again")
        yield ProvenanceDataItem(
            names + [self.INVALID_KEY_COUNT_NAME], n_bad_keys,
            (n_bad_keys != 0),
            f"The delay extension {label} has dropped {n_bad_keys} packets "
            "due to the packet key being invalid. This is likely a routing "
            "issue. Please fix and try again")
        yield ProvenanceDataItem(names + [self.N_PACKETS_RECEIVED_NAME],
                                 n_received)
        yield ProvenanceDataItem(
            names + [self.N_PACKETS_PROCESSED_NAME], n_processed,
            (n_received != n_processed),
            f"The delay extension {label} only processed {n_processed} of "
            f"{n_received} received packets.  This could indicate a fault.")
        yield ProvenanceDataItem(
            names + [self.MISMATCH_ADDED_FROM_PROCESSED_NAME], n_added,
            (n_added != n_processed),
            f"The delay extension {label} only added {n_added} of "
            f"{n_processed} processed packets.  This could indicate a "
            "routing or filtering fault")
        yield ProvenanceDataItem(names + [self.N_PACKETS_SENT_NAME], n_sent)
        yield ProvenanceDataItem(
            names + [self.INPUT_BUFFER_LOST_NAME], n_overflows,
            (n_overflows > 0),
            f"The input buffer for {label} lost packets on {n_overflows} "
            "occasions. This is often a sign that the system is running "
            "too quickly for the number of neurons per core.  Please "
            "increase the timer_tic or time_scale_factor or decrease the "
            "number of neurons per core.")
        yield ProvenanceDataItem(names + [self.DELAYED_FOR_TRAFFIC_NAME],
                                 n_delays)
        yield self._app_vertex.get_tdma_provenance_item(
            names, label, n_tdma_behind)

        late_message = (
            f"On {label}, {n_late_spikes} packets were dropped from the "
            "input buffer, because they arrived too late to be processed in "
            "a given time step. Try increasing the time_scale_factor located "
            "within the .spynnaker.cfg file or in the pynn.setup() method."
            if self._app_vertex.drop_late_spikes else
            f"On {label}, {n_late_spikes} packets arrived too late to be "
            "processed in a given time step. Try increasing the "
            "time_scale_factor located within the .spynnaker.cfg file or in "
            "the pynn.setup() method.")
        yield ProvenanceDataItem(names + [self.N_LATE_SPIKES_NAME],
                                 n_late_spikes,
                                 report=(n_late_spikes > 0),
                                 message=late_message)

        yield ProvenanceDataItem(
            names + [self.BACKGROUND_MAX_QUEUED_NAME], max_bg, (max_bg > 1),
            f"On {label}, a maximum of {max_bg} background tasks were queued. "
            "Try increasing the time_scale_factor located within the "
            ".spynnaker.cfg file or in the pynn.setup() method.")
        yield ProvenanceDataItem(
            names + [self.BACKGROUND_OVERLOADS_NAME], n_bg_overloads,
            (n_bg_overloads > 0),
            f"On {label}, the background queue overloaded {n_bg_overloads} "
            "times. Try increasing the time_scale_factor located within the "
            ".spynnaker.cfg file or in the pynn.setup() method.")
    def get_provenance_data_from_machine(self, transceiver, placement):
        # pylint: disable=too-many-locals
        provenance_data = self._read_provenance_data(transceiver, placement)
        provenance_items = self._read_basic_provenance_items(
            provenance_data, placement)
        provenance_data = self._get_remaining_provenance_data_items(
            provenance_data)

        n_packets_received = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.N_PACKETS_RECEIVED.value]
        n_packets_processed = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.N_PACKETS_PROCESSED.value]
        n_packets_added = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.N_PACKETS_ADDED.value]
        n_packets_sent = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.N_PACKETS_SENT.value]
        n_buffer_overflows = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.N_BUFFER_OVERFLOWS.value]
        n_delays = provenance_data[
            self.EXTRA_PROVENANCE_DATA_ENTRIES.N_DELAYS.value]

        label, x, y, p, names = self._get_placement_details(placement)

        # translate into provenance data items
        provenance_items.append(ProvenanceDataItem(
            self._add_name(names, "Number_of_packets_received"),
            n_packets_received))
        provenance_items.append(ProvenanceDataItem(
            self._add_name(names, "Number_of_packets_processed"),
            n_packets_processed,
            report=n_packets_received != n_packets_processed,
            message=(
                "The delay extension {} on {}, {}, {} only processed {} of {}"
                " received packets.  This could indicate a fault.".format(
                    label, x, y, p, n_packets_processed, n_packets_received))))
        provenance_items.append(ProvenanceDataItem(
            self._add_name(names, "Number_of_packets_added_to_delay_slot"),
            n_packets_added,
            report=n_packets_added != n_packets_processed,
            message=(
                "The delay extension {} on {}, {}, {} only added {} of {}"
                " processed packets.  This could indicate a routing or"
                " filtering fault".format(
                    label, x, y, p, n_packets_added, n_packets_processed))))
        provenance_items.append(ProvenanceDataItem(
            self._add_name(names, "Number_of_packets_sent"),
            n_packets_sent))
        provenance_items.append(ProvenanceDataItem(
            self._add_name(names, "Times_the_input_buffer_lost_packets"),
            n_buffer_overflows,
            report=n_buffer_overflows > 0,
            message=(
                "The input buffer for {} on {}, {}, {} lost packets on {} "
                "occasions. This is often a sign that the system is running "
                "too quickly for the number of neurons per core.  Please "
                "increase the timer_tic or time_scale_factor or decrease the "
                "number of neurons per core.".format(
                    label, x, y, p, n_buffer_overflows))))
        provenance_items.append(ProvenanceDataItem(
            self._add_name(names, "Number_of_times_delayed_to_spread_traffic"),
            n_delays))
        return provenance_items
    def get_provenance_data_from_machine(self, transceiver, placement):
        provenance_data = self._read_provenance_data(transceiver, placement)
        provenance_items = self._read_basic_provenance_items(
            provenance_data, placement)
        provenance_data = self._get_remaining_provenance_data_items(
            provenance_data)
        _, _, _, _, names = self._get_placement_details(placement)

        provenance_items.append(
            ProvenanceDataItem(
                self._add_name(names, "received_sdp_packets"),
                provenance_data[
                    self._PROVENANCE_ITEMS.N_RECEIVED_PACKETS.value],
                report=(provenance_data[
                    self._PROVENANCE_ITEMS.N_RECEIVED_PACKETS.value] == 0
                        and self._send_buffer_times is None),
                message=
                ("No SDP packets were received by {}.  If you expected packets"
                 " to be injected, this could indicate an error".format(
                     self._label))))
        provenance_items.append(
            ProvenanceDataItem(
                self._add_name(names, "send_multicast_packets"),
                provenance_data[self._PROVENANCE_ITEMS.N_SENT_PACKETS.value],
                report=provenance_data[
                    self._PROVENANCE_ITEMS.N_SENT_PACKETS.value] == 0,
                message=(
                    "No multicast packets were sent by {}.  If you expected"
                    " packets to be sent this could indicate an error".format(
                        self._label))))
        provenance_items.append(
            ProvenanceDataItem(
                self._add_name(names, "incorrect_keys"),
                provenance_data[self._PROVENANCE_ITEMS.INCORRECT_KEYS.value],
                report=provenance_data[
                    self._PROVENANCE_ITEMS.INCORRECT_KEYS.value] > 0,
                message=(
                    "Keys were received by {} that did not match the key {} and"
                    " mask {}".format(self._label, self._virtual_key,
                                      self._mask))))
        provenance_items.append(
            ProvenanceDataItem(
                self._add_name(names, "incorrect_packets"),
                provenance_data[
                    self._PROVENANCE_ITEMS.INCORRECT_PACKETS.value],
                report=provenance_data[
                    self._PROVENANCE_ITEMS.INCORRECT_PACKETS.value] > 0,
                message=(
                    "SDP Packets were received by {} that were not correct".
                    format(self._label))))
        provenance_items.append(
            ProvenanceDataItem(
                self._add_name(names, "late_packets"),
                provenance_data[self._PROVENANCE_ITEMS.LATE_PACKETS.value],
                report=provenance_data[
                    self._PROVENANCE_ITEMS.LATE_PACKETS.value] > 0,
                message=(
                    "SDP Packets were received by {} that were too late to be"
                    " transmitted in the simulation".format(self._label))))

        return provenance_items