Ejemplo n.º 1
0
    def _check_delay_values(self, app_edge, synapse_infos):
        """ checks the delay required from the user defined max, the max delay\
            supported by the post vertex splitter and the delay Extensions.

        :param ApplicationEdge app_edge: the undelayed app edge
        :param iterable[SynapseInfo] synapse_infos: iterable of synapse infos
        :return: tuple(n_delay_stages, delay_steps_per_stage, extension_needed)
        """

        # get max delay required
        max_delay_needed_ms = max(
            synapse_info.synapse_dynamics.get_delay_maximum(
                synapse_info.connector, synapse_info)
            for synapse_info in synapse_infos)

        # get if the post vertex needs a delay extension
        post_splitter = app_edge.post_vertex.splitter
        if not isinstance(post_splitter, AbstractSpynnakerSplitterDelay):
            raise DelayExtensionException(
                self.INVALID_SPLITTER_FOR_DELAYS_ERROR_MSG.format(
                    app_edge.post_vertex, post_splitter, app_edge))

        max_delay_steps = app_edge.post_vertex.splitter.max_support_delay()
        max_delay_ms = max_delay_steps * machine_time_step_ms()

        # if does not need a delay extension, run away
        if max_delay_ms >= max_delay_needed_ms:
            return 0, max_delay_steps, False

        # Check post vertex is ok with getting a delay
        if not post_splitter.accepts_edges_from_delay_vertex():
            raise DelayExtensionException(
                self.DELAYS_NOT_SUPPORTED_SPLITTER.format(
                    app_edge.post_vertex, post_splitter, app_edge))

        # needs a delay extension, check can be supported with 1 delay
        # extension. coz we dont do more than 1 at the moment
        ext_provided_ms = (
            DelayExtensionVertex.get_max_delay_ticks_supported(max_delay_steps)
            * machine_time_step_ms())
        total_delay_ms = ext_provided_ms + max_delay_ms
        if total_delay_ms < max_delay_needed_ms:
            raise DelayExtensionException(
                self.NOT_SUPPORTED_DELAY_ERROR_MSG.format(
                    max_delay_needed_ms, app_edge,
                    app_edge.post_vertex.splitter, max_delay_ms,
                    ext_provided_ms))

        # return data for building delay extensions
        n_stages = int(math.ceil(max_delay_needed_ms / max_delay_ms)) - 1
        return n_stages, max_delay_steps, True
Ejemplo n.º 2
0
 def set_projection_information(self, synapse_info):
     """ sets a connectors projection info
     :param SynapseInformation synapse_info: the synapse info
     """
     # pylint: disable=unused-argument
     self._rng = (self._rng or NumpyRNG())
     self.__min_delay = machine_time_step_ms()
Ejemplo n.º 3
0
    def _process_spike_data(vertex_slice, n_words, raw_data, spike_ids,
                            spike_times):
        """
        :param ~pacman.model.graphs.common.Slice vertex_slice:
        :param int n_words:
        :param bytearray raw_data:
        :param list(~numpy.ndarray) spike_ids:
        :param list(~numpy.ndarray) spike_times:
        """
        # pylint: disable=too-many-arguments
        n_bytes_per_block = n_words * BYTES_PER_WORD
        offset = 0
        while offset < len(raw_data):
            time, n_blocks = _TWO_WORDS.unpack_from(raw_data, offset)
            offset += _TWO_WORDS.size
            spike_data = numpy.frombuffer(raw_data,
                                          dtype="uint8",
                                          count=n_bytes_per_block * n_blocks,
                                          offset=offset)
            offset += n_bytes_per_block * n_blocks

            spikes = spike_data.view("<i4").byteswap().view("uint8")
            bits = numpy.fliplr(numpy.unpackbits(spikes).reshape(
                (-1, 32))).reshape((-1, n_bytes_per_block * 8))
            indices = numpy.nonzero(bits)[1]
            times = numpy.repeat([time * machine_time_step_ms()], len(indices))
            indices = indices + vertex_slice.lo_atom
            spike_ids.append(indices)
            spike_times.append(times)
Ejemplo n.º 4
0
    def _process_spike_data(vertex_slice, spike_data, base_key, results):
        """
        :param ~pacman.model.graphs.common.Slice vertex_slice:
        :param bytearray spike_data:
        :param int base_key:
        :param list(~numpy.ndarray) results:
        """
        number_of_bytes_written = len(spike_data)
        offset = 0
        while offset < number_of_bytes_written:
            length, time = _TWO_WORDS.unpack_from(spike_data, offset)
            time *= machine_time_step_ms()
            data_offset = offset + 2 * BYTES_PER_WORD

            eieio_header = EIEIODataHeader.from_bytestring(
                spike_data, data_offset)
            if eieio_header.eieio_type.payload_bytes > 0:
                raise Exception("Can only read spikes as keys")

            data_offset += eieio_header.size
            timestamps = numpy.repeat([time], eieio_header.count)
            key_bytes = eieio_header.eieio_type.key_bytes
            keys = numpy.frombuffer(spike_data,
                                    dtype="<u{}".format(key_bytes),
                                    count=eieio_header.count,
                                    offset=data_offset)

            neuron_ids = (keys - base_key) + vertex_slice.lo_atom
            offset += length + 2 * BYTES_PER_WORD
            results.append(numpy.dstack((neuron_ids, timestamps))[0])
    def __get_max_delay(self):
        if self.__max_delay is not None:
            return self.__max_delay

        # Find the maximum delay from incoming synapses
        app_vertex = self._governed_app_vertex
        max_delay_ms = 0
        for proj in app_vertex.incoming_projections:
            s_info = proj._synapse_information
            proj_max_delay = s_info.synapse_dynamics.get_delay_maximum(
                s_info.connector, s_info)
            max_delay_ms = max(max_delay_ms, proj_max_delay)
        max_delay_steps = math.ceil(max_delay_ms / machine_time_step_ms())
        max_delay_bits = get_n_bits(max_delay_steps)

        # Find the maximum possible delay
        n_atom_bits = get_n_bits(
            min(app_vertex.get_max_atoms_per_core(), app_vertex.n_atoms))
        n_synapse_bits = get_n_bits(
            app_vertex.neuron_impl.get_n_synapse_types())
        n_delay_bits = MAX_RING_BUFFER_BITS - (n_atom_bits + n_synapse_bits)

        # Pick the smallest between the two, so that not too many bits are used
        final_n_delay_bits = min(n_delay_bits, max_delay_bits)
        self.__max_delay = 2**final_n_delay_bits
        if self.__allow_delay_extension is None:
            self.__allow_delay_extension = max_delay_bits > final_n_delay_bits
        return self.__max_delay
Ejemplo n.º 6
0
def get_maximum_delay_supported_in_ms(post_vertex_max_delay_ticks):
    """ Get the maximum delay supported by the synapse representation \
        before extensions are required, or None if any delay is supported

    :param int post_vertex_max_delay_ticks: post vertex max delay
    :rtype: int
    """
    return post_vertex_max_delay_ticks * machine_time_step_ms()
    def __init__(self,
                 alpha,
                 tau=default_parameters['tau'],
                 A_plus=0.01,
                 A_minus=0.01):
        r"""
        :param float alpha: :math:`\alpha`
        :param float tau: :math:`\tau`
        :param float A_plus: :math:`A^+`
        :param float A_minus: :math:`A^-`
        """
        self.__alpha = alpha
        self.__tau = tau
        self.__a_plus = A_plus
        self.__a_minus = A_minus

        self.__synapse_structure = SynapseStructureWeightOnly()

        self.__tau_data = get_exp_lut_array(machine_time_step_ms(), self.__tau)
    def __init__(self,
                 tau_plus=default_parameters['tau_plus'],
                 tau_minus=default_parameters['tau_minus'],
                 A_plus=0.01,
                 A_minus=0.01):
        r"""
        :param float tau_plus: :math:`\tau_+`
        :param float tau_minus: :math:`\tau_-`
        :param float A_plus: :math:`A^+`
        :param float A_minus: :math:`A^-`
        """
        self.__tau_plus = tau_plus
        self.__tau_minus = tau_minus
        self.__a_plus = A_plus
        self.__a_minus = A_minus

        self.__synapse_structure = SynapseStructureWeightOnly()

        ts = machine_time_step_ms()
        self.__tau_plus_data = get_exp_lut_array(ts, self.__tau_plus)
        self.__tau_minus_data = get_exp_lut_array(ts, self.__tau_minus)
Ejemplo n.º 9
0
    def __init__(self, tau_plus, tau_minus, tau_x, tau_y, A_plus, A_minus):
        r"""
        :param float tau_plus: :math:`\tau_+`
        :param float tau_minus: :math:`\tau_-`
        :param float tau_x: :math:`\tau_x`
        :param float tau_y: :math:`\tau_y`
        :param float A_plus: :math:`A^+`
        :param float A_minus: :math:`A^-`
        """
        self.__tau_plus = tau_plus
        self.__tau_minus = tau_minus
        self.__tau_x = tau_x
        self.__tau_y = tau_y
        self.__a_plus = A_plus
        self.__a_minus = A_minus

        self.__synapse_structure = SynapseStructureWeightOnly()

        ts = machine_time_step_ms()
        self.__tau_plus_data = get_exp_lut_array(ts, self.__tau_plus)
        self.__tau_minus_data = get_exp_lut_array(ts, self.__tau_minus)
        self.__tau_x_data = get_exp_lut_array(ts, self.__tau_x, shift=2)
        self.__tau_y_data = get_exp_lut_array(ts, self.__tau_y, shift=2)
Ejemplo n.º 10
0
    def __init__(self,
                 pre_synaptic_population,
                 post_synaptic_population,
                 connector,
                 synapse_type=None,
                 source=None,
                 receptor_type=None,
                 space=None,
                 label=None):
        """
        :param ~spynnaker.pyNN.models.populations.PopulationBase \
                pre_synaptic_population:
        :param ~spynnaker.pyNN.models.populations.PopulationBase \
                post_synaptic_population:
        :param AbstractConnector connector:
        :param AbstractSynapseDynamics synapse_type:
        :param None source: Unsupported; must be None
        :param str receptor_type:
        :param ~pyNN.space.Space space:
        :param str label:
        """
        # pylint: disable=too-many-arguments, too-many-locals
        if source is not None:
            raise NotImplementedError(
                "sPyNNaker {} does not yet support multi-compartmental "
                "cells.".format(__version__))

        sim = get_simulator()
        self.__projection_edge = None
        self.__host_based_synapse_list = None
        self.__has_retrieved_synaptic_list_from_machine = False
        self.__requires_mapping = True
        self.__label = label

        pre_is_view = self.__check_population(pre_synaptic_population,
                                              connector)
        post_is_view = self.__check_population(post_synaptic_population,
                                               connector)

        # set default label
        if label is None:
            # set the projection's label to a default (maybe non-unique!)
            self.__label = ("from pre {} to post {} with connector {}".format(
                pre_synaptic_population.label, post_synaptic_population.label,
                connector))
            # give an auto generated label for the underlying edge
            label = "projection edge {}".format(sim.none_labelled_edge_count)
            sim.increment_none_labelled_edge_count()

        # Handle default synapse type
        if synapse_type is None:
            synapse_dynamics = SynapseDynamicsStatic()
        else:
            synapse_dynamics = synapse_type

        # set the space function as required
        if space is None:
            space = PyNNSpace()
        connector.set_space(space)

        pre_vertex = pre_synaptic_population._vertex
        post_vertex = post_synaptic_population._vertex

        if not isinstance(post_vertex, AbstractAcceptsIncomingSynapses):
            raise ConfigurationException(
                "postsynaptic population is not designed to receive"
                " synaptic projections")

        # sort out synapse type
        synaptic_type = post_vertex.get_synapse_id_by_target(receptor_type)
        synapse_type_from_dynamics = False
        if synaptic_type is None:
            synaptic_type = synapse_dynamics.get_synapse_id_by_target(
                receptor_type)
            synapse_type_from_dynamics = True
        if synaptic_type is None:
            raise ConfigurationException(
                "Synapse target {} not found in {}".format(
                    receptor_type, post_synaptic_population.label))

        # as a from-list connector can have plastic parameters, grab those (
        # if any) and add them to the synapse dynamics object
        if isinstance(connector, FromListConnector):
            connector._apply_parameters_to_synapse_type(synaptic_type)

        # round the delays to multiples of full timesteps
        # (otherwise SDRAM estimation calculations can go wrong)
        if ((not isinstance(synapse_dynamics.delay, RandomDistribution))
                and (not isinstance(synapse_dynamics.delay, str))):
            synapse_dynamics.set_delay(
                numpy.rint(
                    numpy.array(synapse_dynamics.delay) *
                    machine_time_step_per_ms()) * machine_time_step_ms())

        # set the plasticity dynamics for the post pop (allows plastic stuff
        #  when needed)
        post_vertex.set_synapse_dynamics(synapse_dynamics)

        # get rng if needed
        rng = connector.rng if hasattr(connector, "rng") else None
        # Set and store synapse information for future processing
        self.__synapse_information = SynapseInformation(
            connector, pre_synaptic_population, post_synaptic_population,
            pre_is_view, post_is_view, rng, synapse_dynamics, synaptic_type,
            receptor_type, sim.use_virtual_board, synapse_type_from_dynamics,
            synapse_dynamics.weight, synapse_dynamics.delay)

        # Set projection information in connector
        connector.set_projection_information(self.__synapse_information)

        # Find out if there is an existing edge between the populations
        edge_to_merge = self._find_existing_edge(pre_vertex, post_vertex)
        if edge_to_merge is not None:

            # If there is an existing edge, add the connector
            edge_to_merge.add_synapse_information(self.__synapse_information)
            self.__projection_edge = edge_to_merge
        else:

            # If there isn't an existing edge, create a new one and add it
            self.__projection_edge = ProjectionApplicationEdge(
                pre_vertex,
                post_vertex,
                self.__synapse_information,
                label=label)
            sim.add_application_edge(self.__projection_edge,
                                     SPIKE_PARTITION_ID)

        # add projection to the SpiNNaker control system
        sim.add_projection(self)

        # If there is a virtual board, we need to hold the data in case the
        # user asks for it
        self.__virtual_connection_list = None
        if sim.use_virtual_board:
            self.__virtual_connection_list = list()
            connection_holder = ConnectionHolder(
                None, False, pre_vertex.n_atoms, post_vertex.n_atoms,
                self.__virtual_connection_list)

            self.__synapse_information.add_pre_run_connection_holder(
                connection_holder)

        # If the target is a population, add to the list of incoming
        # projections
        if isinstance(post_vertex, AbstractPopulationVertex):
            post_vertex.add_incoming_projection(self)

        # If the source is a poisson, add to the list of outgoing projections
        if isinstance(pre_vertex, SpikeSourcePoissonVertex):
            pre_vertex.add_outgoing_projection(self)