Ejemplo n.º 1
0
    def __init__(
            self, n_neurons, label, constraints, port, virtual_key,
            spike_buffer_max_size, buffer_size_before_receive,
            time_between_requests, buffer_notification_ip_address,
            buffer_notification_port):
        # pylint: disable=too-many-arguments
        config = get_simulator().config
        if buffer_notification_ip_address is None:
            buffer_notification_ip_address = config.get(
                "Buffers", "receive_buffer_host")
        if buffer_notification_port is None:
            buffer_notification_port = config.get_int(
                "Buffers", "receive_buffer_port")

        super(SpikeInjectorVertex, self).__init__(
            n_keys=n_neurons, label=label, receive_port=port,
            virtual_key=virtual_key, reserve_reverse_ip_tag=True,
            buffer_notification_ip_address=buffer_notification_ip_address,
            buffer_notification_port=buffer_notification_port,
            constraints=constraints)

        # Set up for recording
        self._spike_recorder = EIEIOSpikeRecorder()
        self._spike_buffer_max_size = spike_buffer_max_size
        if spike_buffer_max_size is None:
            self._spike_buffer_max_size = config.getint(
                "Buffers", "spike_buffer_size")
        self._buffer_size_before_receive = buffer_size_before_receive
        if buffer_size_before_receive is None:
            self._buffer_size_before_receive = config.getint(
                "Buffers", "buffer_size_before_receive")
        self._time_between_requests = time_between_requests
        if time_between_requests is None:
            self._time_between_requests = config.getint(
                "Buffers", "time_between_requests")
Ejemplo n.º 2
0
    def __init__(self, n_neurons, label, constraints, port, virtual_key,
                 reserve_reverse_ip_tag):
        # pylint: disable=too-many-arguments

        super(SpikeInjectorVertex,
              self).__init__(n_keys=n_neurons,
                             label=label,
                             receive_port=port,
                             virtual_key=virtual_key,
                             reserve_reverse_ip_tag=reserve_reverse_ip_tag,
                             constraints=constraints)

        # Set up for recording
        self._spike_recorder = EIEIOSpikeRecorder()
Ejemplo n.º 3
0
    def __init__(self, n_neurons, label, constraints, port, virtual_key,
                 reserve_reverse_ip_tag, splitter):
        # pylint: disable=too-many-arguments
        self.__receive_port = None
        self.__virtual_key = None

        super().__init__(n_keys=n_neurons,
                         label=label,
                         receive_port=port,
                         virtual_key=virtual_key,
                         reserve_reverse_ip_tag=reserve_reverse_ip_tag,
                         constraints=constraints,
                         enable_injection=True,
                         splitter=splitter)

        # Set up for recording
        self.__spike_recorder = EIEIOSpikeRecorder()
    def __init__(
            self, n_neurons, label, constraints, port, virtual_key,
            reserve_reverse_ip_tag):
        # pylint: disable=too-many-arguments

        super(SpikeInjectorVertex, self).__init__(
            n_keys=n_neurons, label=label, receive_port=port,
            virtual_key=virtual_key,
            reserve_reverse_ip_tag=reserve_reverse_ip_tag,
            constraints=constraints)

        # Set up for recording
        self._spike_recorder = EIEIOSpikeRecorder()
    def __init__(
            self, n_neurons, spike_times, constraints, label,
            max_atoms_per_core, model):
        # pylint: disable=too-many-arguments
        self.__model_name = "SpikeSourceArray"
        self.__model = model

        if spike_times is None:
            spike_times = []
        self._spike_times = spike_times
        time_step = self.get_spikes_sampling_interval()

        super(SpikeSourceArrayVertex, self).__init__(
            n_keys=n_neurons, label=label, constraints=constraints,
            max_atoms_per_core=max_atoms_per_core,
            send_buffer_times=_send_buffer_times(spike_times, time_step),
            send_buffer_partition_id=constants.SPIKE_PARTITION_ID)

        # handle recording
        self.__spike_recorder = EIEIOSpikeRecorder()

        # used for reset and rerun
        self.__requires_mapping = True
    def __init__(
            self, n_neurons, spike_times, constraints, label,
            max_atoms_per_core, model):
        # pylint: disable=too-many-arguments
        self._model_name = "SpikeSourceArray"
        self._model = model

        if spike_times is None:
            spike_times = []
        self._spike_times = spike_times
        time_step = self.get_spikes_sampling_interval()

        super(SpikeSourceArrayVertex, self).__init__(
            n_keys=n_neurons, label=label, constraints=constraints,
            max_atoms_per_core=max_atoms_per_core,
            send_buffer_times=_send_buffer_times(spike_times, time_step),
            send_buffer_partition_id=constants.SPIKE_PARTITION_ID)

        # handle recording
        self._spike_recorder = EIEIOSpikeRecorder()

        # used for reset and rerun
        self._requires_mapping = True
Ejemplo n.º 7
0
class SpikeInjectorVertex(
        ReverseIpTagMultiCastSource,
        AbstractProvidesOutgoingPartitionConstraints,
        AbstractSpikeRecordable, SimplePopulationSettable):
    """ An Injector of Spikes for PyNN populations.  This only allows the user\
        to specify the virtual_key of the population to identify the population
    """
    __slots__ = [
        "_buffer_size_before_receive",
        "_receive_port",
        "_requires_mapping",
        "_spike_buffer_max_size",
        "_spike_recorder",
        "_time_between_requests",
        "_virtual_key"]

    default_parameters = {
        'label': "spikeInjector", 'port': None, 'virtual_key': None}

    SPIKE_RECORDING_REGION_ID = 0

    def __init__(
            self, n_neurons, label, constraints, port, virtual_key,
            spike_buffer_max_size, buffer_size_before_receive,
            time_between_requests, buffer_notification_ip_address,
            buffer_notification_port):
        # pylint: disable=too-many-arguments
        config = get_simulator().config
        if buffer_notification_ip_address is None:
            buffer_notification_ip_address = config.get(
                "Buffers", "receive_buffer_host")
        if buffer_notification_port is None:
            buffer_notification_port = config.get_int(
                "Buffers", "receive_buffer_port")

        super(SpikeInjectorVertex, self).__init__(
            n_keys=n_neurons, label=label, receive_port=port,
            virtual_key=virtual_key, reserve_reverse_ip_tag=True,
            buffer_notification_ip_address=buffer_notification_ip_address,
            buffer_notification_port=buffer_notification_port,
            constraints=constraints)

        # Set up for recording
        self._spike_recorder = EIEIOSpikeRecorder()
        self._spike_buffer_max_size = spike_buffer_max_size
        if spike_buffer_max_size is None:
            self._spike_buffer_max_size = config.getint(
                "Buffers", "spike_buffer_size")
        self._buffer_size_before_receive = buffer_size_before_receive
        if buffer_size_before_receive is None:
            self._buffer_size_before_receive = config.getint(
                "Buffers", "buffer_size_before_receive")
        self._time_between_requests = time_between_requests
        if time_between_requests is None:
            self._time_between_requests = config.getint(
                "Buffers", "time_between_requests")

    @property
    def port(self):
        return self._receive_port

    @port.setter
    def port(self, port):
        self._receive_port = port

    @property
    def virtual_key(self):
        return self._virtual_key

    @virtual_key.setter
    def virtual_key(self, virtual_key):
        self._virtual_key = virtual_key

    @overrides(AbstractSpikeRecordable.is_recording_spikes)
    def is_recording_spikes(self):
        return self._spike_recorder.record

    @overrides(AbstractSpikeRecordable.set_recording_spikes)
    def set_recording_spikes(
            self, new_state=True, sampling_interval=None, indexes=None):
        if sampling_interval is not None:
            logger.warning("Sampling interval currently not supported "
                           "so being ignored")
        if indexes is not None:
            logger.warning("Indexes currently not supported "
                           "so being ignored")
        self.enable_recording(
            self._spike_buffer_max_size, self._buffer_size_before_receive,
            self._time_between_requests)
        self._requires_mapping = not self._spike_recorder.record
        self._spike_recorder.record = new_state

    @overrides(AbstractSpikeRecordable.get_spikes_sampling_interval)
    def get_spikes_sampling_interval(self):
        return get_simulator().machine_time_step

    @overrides(AbstractSpikeRecordable.get_spikes)
    def get_spikes(
            self, placements, graph_mapper, buffer_manager, machine_time_step):
        return self._spike_recorder.get_spikes(
            self.label, buffer_manager,
            SpikeInjectorVertex.SPIKE_RECORDING_REGION_ID,
            placements, graph_mapper, self,
            lambda vertex:
                vertex.virtual_key
                if vertex.virtual_key is not None
                else 0,
            machine_time_step)

    @overrides(AbstractSpikeRecordable.clear_spike_recording)
    def clear_spike_recording(self, buffer_manager, placements, graph_mapper):
        machine_vertices = graph_mapper.get_machine_vertices(self)
        for machine_vertex in machine_vertices:
            placement = placements.get_placement_of_vertex(machine_vertex)
            buffer_manager.clear_recorded_data(
                placement.x, placement.y, placement.p,
                SpikeInjectorVertex.SPIKE_RECORDING_REGION_ID)

    @overrides(AbstractProvidesOutgoingPartitionConstraints.
               get_outgoing_partition_constraints)
    def get_outgoing_partition_constraints(self, partition):
        constraints = ReverseIpTagMultiCastSource\
            .get_outgoing_partition_constraints(self, partition)
        constraints.append(ContiguousKeyRangeContraint())
        return constraints

    def describe(self):
        """
        Returns a human-readable description of the cell or synapse type.

        The output may be customised by specifying a different template
        together with an associated template engine
        (see ``pyNN.descriptions``).

        If template is None, then a dictionary containing the template context
        will be returned.
        """

        parameters = dict()
        for parameter_name in self.default_parameters:
            parameters[parameter_name] = self.get_value(parameter_name)

        context = {
            "name": "SpikeInjector",
            "default_parameters": self.default_parameters,
            "default_initial_values": self.default_parameters,
            "parameters": parameters,
        }
        return context
class SpikeInjectorVertex(
        ReverseIpTagMultiCastSource,
        AbstractProvidesOutgoingPartitionConstraints,
        AbstractSpikeRecordable, SimplePopulationSettable):
    """ An Injector of Spikes for PyNN populations.  This only allows the user\
        to specify the virtual_key of the population to identify the population
    """
    __slots__ = [
        "_receive_port",
        "_requires_mapping",
        "_spike_recorder",
        "_virtual_key"]

    default_parameters = {
        'label': "spikeInjector", 'port': None, 'virtual_key': None}

    SPIKE_RECORDING_REGION_ID = 0

    def __init__(
            self, n_neurons, label, constraints, port, virtual_key,
            reserve_reverse_ip_tag):
        # pylint: disable=too-many-arguments

        super(SpikeInjectorVertex, self).__init__(
            n_keys=n_neurons, label=label, receive_port=port,
            virtual_key=virtual_key,
            reserve_reverse_ip_tag=reserve_reverse_ip_tag,
            constraints=constraints)

        # Set up for recording
        self._spike_recorder = EIEIOSpikeRecorder()

    @property
    def port(self):
        return self._receive_port

    @port.setter
    def port(self, port):
        self._receive_port = port

    @property
    def virtual_key(self):
        return self._virtual_key

    @virtual_key.setter
    def virtual_key(self, virtual_key):
        self._virtual_key = virtual_key

    @overrides(AbstractSpikeRecordable.is_recording_spikes)
    def is_recording_spikes(self):
        return self._spike_recorder.record

    @overrides(AbstractSpikeRecordable.set_recording_spikes)
    def set_recording_spikes(
            self, new_state=True, sampling_interval=None, indexes=None):
        if sampling_interval is not None:
            logger.warning("Sampling interval currently not supported "
                           "so being ignored")
        if indexes is not None:
            logger.warning("Indexes currently not supported "
                           "so being ignored")
        self.enable_recording(new_state)
        self._requires_mapping = not self._spike_recorder.record
        self._spike_recorder.record = new_state

    @overrides(AbstractSpikeRecordable.get_spikes_sampling_interval)
    def get_spikes_sampling_interval(self):
        return get_simulator().machine_time_step

    @overrides(AbstractSpikeRecordable.get_spikes)
    def get_spikes(
            self, placements, graph_mapper, buffer_manager, machine_time_step):
        return self._spike_recorder.get_spikes(
            self.label, buffer_manager,
            SpikeInjectorVertex.SPIKE_RECORDING_REGION_ID,
            placements, graph_mapper, self,
            lambda vertex:
                vertex.virtual_key
                if vertex.virtual_key is not None
                else 0,
            machine_time_step)

    @overrides(AbstractSpikeRecordable.clear_spike_recording)
    def clear_spike_recording(self, buffer_manager, placements, graph_mapper):
        machine_vertices = graph_mapper.get_machine_vertices(self)
        for machine_vertex in machine_vertices:
            placement = placements.get_placement_of_vertex(machine_vertex)
            buffer_manager.clear_recorded_data(
                placement.x, placement.y, placement.p,
                SpikeInjectorVertex.SPIKE_RECORDING_REGION_ID)

    @overrides(AbstractProvidesOutgoingPartitionConstraints.
               get_outgoing_partition_constraints)
    def get_outgoing_partition_constraints(self, partition):
        constraints = ReverseIpTagMultiCastSource\
            .get_outgoing_partition_constraints(self, partition)
        constraints.append(ContiguousKeyRangeContraint())
        return constraints

    def describe(self):
        """
        Returns a human-readable description of the cell or synapse type.

        The output may be customised by specifying a different template
        together with an associated template engine
        (see ``pyNN.descriptions``).

        If template is None, then a dictionary containing the template context
        will be returned.
        """

        parameters = dict()
        for parameter_name in self.default_parameters:
            parameters[parameter_name] = self.get_value(parameter_name)

        context = {
            "name": "SpikeInjector",
            "default_parameters": self.default_parameters,
            "default_initial_values": self.default_parameters,
            "parameters": parameters,
        }
        return context
class SpikeSourceArrayVertex(
        ReverseIpTagMultiCastSource, AbstractSpikeRecordable,
        SimplePopulationSettable, AbstractChangableAfterRun,
        ProvidesKeyToAtomMappingImpl):
    """ Model for play back of spikes
    """

    SPIKE_RECORDING_REGION_ID = 0

    def __init__(
            self, n_neurons, spike_times, constraints, label,
            max_atoms_per_core, model):
        # pylint: disable=too-many-arguments
        self.__model_name = "SpikeSourceArray"
        self.__model = model

        if spike_times is None:
            spike_times = []
        self._spike_times = spike_times
        time_step = self.get_spikes_sampling_interval()

        super(SpikeSourceArrayVertex, self).__init__(
            n_keys=n_neurons, label=label, constraints=constraints,
            max_atoms_per_core=max_atoms_per_core,
            send_buffer_times=_send_buffer_times(spike_times, time_step),
            send_buffer_partition_id=constants.SPIKE_PARTITION_ID)

        # handle recording
        self.__spike_recorder = EIEIOSpikeRecorder()

        # used for reset and rerun
        self.__requires_mapping = True

    @property
    @overrides(AbstractChangableAfterRun.requires_mapping)
    def requires_mapping(self):
        return self.__requires_mapping

    @overrides(AbstractChangableAfterRun.mark_no_changes)
    def mark_no_changes(self):
        self.__requires_mapping = False

    @property
    def spike_times(self):
        """ The spike times of the spike source array
        """
        return self._spike_times

    @spike_times.setter
    def spike_times(self, spike_times):
        """ Set the spike source array's spike times. Not an extend, but an\
            actual change

        """
        time_step = self.get_spikes_sampling_interval()
        self.send_buffer_times = _send_buffer_times(spike_times, time_step)
        self._spike_times = spike_times

    @overrides(AbstractSpikeRecordable.is_recording_spikes)
    def is_recording_spikes(self):
        return self.__spike_recorder.record

    @overrides(AbstractSpikeRecordable.set_recording_spikes)
    def set_recording_spikes(
            self, new_state=True, sampling_interval=None, indexes=None):
        if sampling_interval is not None:
            logger.warning("Sampling interval currently not supported for "
                           "SpikeSourceArray so being ignored")
        if indexes is not None:
            logger.warning("Indexes currently not supported for "
                           "SpikeSourceArray so being ignored")
        self.enable_recording(new_state)
        self.__requires_mapping = not self.__spike_recorder.record
        self.__spike_recorder.record = new_state

    @overrides(AbstractSpikeRecordable.get_spikes_sampling_interval)
    def get_spikes_sampling_interval(self):
        return globals_variables.get_simulator().machine_time_step

    @overrides(AbstractSpikeRecordable.get_spikes)
    def get_spikes(
            self, placements, graph_mapper, buffer_manager, machine_time_step):
        return self.__spike_recorder.get_spikes(
            self.label, buffer_manager, 0,
            placements, graph_mapper, self,
            lambda vertex:
                vertex.virtual_key
                if vertex.virtual_key is not None
                else 0,
            machine_time_step)

    @overrides(AbstractSpikeRecordable.clear_spike_recording)
    def clear_spike_recording(self, buffer_manager, placements, graph_mapper):
        machine_vertices = graph_mapper.get_machine_vertices(self)
        for machine_vertex in machine_vertices:
            placement = placements.get_placement_of_vertex(machine_vertex)
            buffer_manager.clear_recorded_data(
                placement.x, placement.y, placement.p,
                SpikeSourceArrayVertex.SPIKE_RECORDING_REGION_ID)

    @staticmethod
    def set_model_max_atoms_per_core(new_value=sys.maxsize):
        SpikeSourceArrayVertex._model_based_max_atoms_per_core = new_value

    def describe(self):
        """ Returns a human-readable description of the cell or synapse type.

        The output may be customised by specifying a different template\
        together with an associated template engine\
        (see ``pyNN.descriptions``).

        If template is None, then a dictionary containing the template\
        context will be returned.
        """

        parameters = dict()
        for parameter_name in self.__model.default_parameters:
            parameters[parameter_name] = self.get_value(parameter_name)

        context = {
            "name": self.__model_name,
            "default_parameters": self.__model.default_parameters,
            "default_initial_values": self.__model.default_parameters,
            "parameters": parameters,
        }
        return context
Ejemplo n.º 10
0
    def __init__(
            self, n_neurons,
            spike_times=default_parameters['spike_times'],
            port=non_pynn_default_parameters['port'],
            tag=non_pynn_default_parameters['tag'],
            ip_address=non_pynn_default_parameters['ip_address'],
            board_address=non_pynn_default_parameters['board_address'],
            max_on_chip_memory_usage_for_spikes_in_bytes=DEFAULT1,
            space_before_notification=non_pynn_default_parameters[
                'space_before_notification'],
            constraints=non_pynn_default_parameters['constraints'],
            label=non_pynn_default_parameters['label'],
            spike_recorder_buffer_size=non_pynn_default_parameters[
                'spike_recorder_buffer_size'],
            buffer_size_before_receive=non_pynn_default_parameters[
                'buffer_size_before_receive']):
        # pylint: disable=too-many-arguments
        self._model_name = "SpikeSourceArray"

        config = globals_variables.get_simulator().config
        self._ip_address = ip_address
        if ip_address is None:
            self._ip_address = config.get("Buffers", "receive_buffer_host")
        self._port = port
        if port is None:
            self._port = helpful_functions.read_config_int(
                config, "Buffers", "receive_buffer_port")
        if spike_times is None:
            spike_times = []

        super(SpikeSourceArray, self).__init__(
            n_keys=n_neurons, label=label, constraints=constraints,
            max_atoms_per_core=(
                SpikeSourceArray._model_based_max_atoms_per_core),
            board_address=board_address,
            receive_port=None, receive_tag=None,
            virtual_key=None, prefix=None, prefix_type=None, check_keys=False,
            send_buffer_times=spike_times,
            send_buffer_partition_id=constants.SPIKE_PARTITION_ID,
            send_buffer_max_space=max_on_chip_memory_usage_for_spikes_in_bytes,
            send_buffer_space_before_notify=space_before_notification,
            buffer_notification_ip_address=self._ip_address,
            buffer_notification_port=self._port,
            buffer_notification_tag=tag)

        # handle recording
        self._spike_recorder = EIEIOSpikeRecorder()
        self._spike_recorder_buffer_size = spike_recorder_buffer_size
        self._buffer_size_before_receive = buffer_size_before_receive

        # Keep track of any previously generated buffers
        self._send_buffers = dict()
        self._spike_recording_region_size = None
        self._machine_vertices = list()

        # used for reset and rerun
        self._requires_mapping = True
        self._last_runtime_position = 0

        self._max_on_chip_memory_usage_for_spikes = \
            max_on_chip_memory_usage_for_spikes_in_bytes
        self._space_before_notification = space_before_notification
        if self._max_on_chip_memory_usage_for_spikes is None:
            self._max_on_chip_memory_usage_for_spikes = \
                MAX_SIZE_OF_BUFFERED_REGION_ON_CHIP

        # check the values do not conflict with chip memory limit
        if self._max_on_chip_memory_usage_for_spikes < 0:
            raise exceptions.ConfigurationException(
                "The memory usage on chip is either beyond what is supportable"
                " on the spinnaker board being supported or you have requested"
                " a negative value for a memory usage. Please correct and"
                " try again")

        if (self._max_on_chip_memory_usage_for_spikes <
                self._space_before_notification):
            self._space_before_notification =\
                self._max_on_chip_memory_usage_for_spikes
Ejemplo n.º 11
0
class SpikeSourceArray(
        ReverseIpTagMultiCastSource, AbstractSpikeRecordable,
        SimplePopulationSettable, AbstractChangableAfterRun,
        ProvidesKeyToAtomMappingImpl):
    """ Model for play back of spikes
    """

    _model_based_max_atoms_per_core = sys.maxint

    # parameters expected by pynn
    default_parameters = {
        'spike_times': None
    }

    # parameters expected by spinnaker
    non_pynn_default_parameters = {
        'port': None, 'tag': None, 'ip_address': None, 'board_address': None,
        'max_on_chip_memory_usage_for_spikes_in_bytes': (
            constants.SPIKE_BUFFER_SIZE_BUFFERING_IN),
        'space_before_notification': 640, 'constraints': None, 'label': None,
        'spike_recorder_buffer_size': (
            constants.EIEIO_SPIKE_BUFFER_SIZE_BUFFERING_OUT),
        'buffer_size_before_receive': (
            constants.EIEIO_BUFFER_SIZE_BEFORE_RECEIVE)}

    SPIKE_RECORDING_REGION_ID = 0

    # Needed to get long names past pep8
    DEFAULT1 = non_pynn_default_parameters[
        'max_on_chip_memory_usage_for_spikes_in_bytes']

    def __init__(
            self, n_neurons,
            spike_times=default_parameters['spike_times'],
            port=non_pynn_default_parameters['port'],
            tag=non_pynn_default_parameters['tag'],
            ip_address=non_pynn_default_parameters['ip_address'],
            board_address=non_pynn_default_parameters['board_address'],
            max_on_chip_memory_usage_for_spikes_in_bytes=DEFAULT1,
            space_before_notification=non_pynn_default_parameters[
                'space_before_notification'],
            constraints=non_pynn_default_parameters['constraints'],
            label=non_pynn_default_parameters['label'],
            spike_recorder_buffer_size=non_pynn_default_parameters[
                'spike_recorder_buffer_size'],
            buffer_size_before_receive=non_pynn_default_parameters[
                'buffer_size_before_receive']):
        # pylint: disable=too-many-arguments
        self._model_name = "SpikeSourceArray"

        config = globals_variables.get_simulator().config
        self._ip_address = ip_address
        if ip_address is None:
            self._ip_address = config.get("Buffers", "receive_buffer_host")
        self._port = port
        if port is None:
            self._port = helpful_functions.read_config_int(
                config, "Buffers", "receive_buffer_port")
        if spike_times is None:
            spike_times = []

        super(SpikeSourceArray, self).__init__(
            n_keys=n_neurons, label=label, constraints=constraints,
            max_atoms_per_core=(
                SpikeSourceArray._model_based_max_atoms_per_core),
            board_address=board_address,
            receive_port=None, receive_tag=None,
            virtual_key=None, prefix=None, prefix_type=None, check_keys=False,
            send_buffer_times=spike_times,
            send_buffer_partition_id=constants.SPIKE_PARTITION_ID,
            send_buffer_max_space=max_on_chip_memory_usage_for_spikes_in_bytes,
            send_buffer_space_before_notify=space_before_notification,
            buffer_notification_ip_address=self._ip_address,
            buffer_notification_port=self._port,
            buffer_notification_tag=tag)

        # handle recording
        self._spike_recorder = EIEIOSpikeRecorder()
        self._spike_recorder_buffer_size = spike_recorder_buffer_size
        self._buffer_size_before_receive = buffer_size_before_receive

        # Keep track of any previously generated buffers
        self._send_buffers = dict()
        self._spike_recording_region_size = None
        self._machine_vertices = list()

        # used for reset and rerun
        self._requires_mapping = True
        self._last_runtime_position = 0

        self._max_on_chip_memory_usage_for_spikes = \
            max_on_chip_memory_usage_for_spikes_in_bytes
        self._space_before_notification = space_before_notification
        if self._max_on_chip_memory_usage_for_spikes is None:
            self._max_on_chip_memory_usage_for_spikes = \
                MAX_SIZE_OF_BUFFERED_REGION_ON_CHIP

        # check the values do not conflict with chip memory limit
        if self._max_on_chip_memory_usage_for_spikes < 0:
            raise exceptions.ConfigurationException(
                "The memory usage on chip is either beyond what is supportable"
                " on the spinnaker board being supported or you have requested"
                " a negative value for a memory usage. Please correct and"
                " try again")

        if (self._max_on_chip_memory_usage_for_spikes <
                self._space_before_notification):
            self._space_before_notification =\
                self._max_on_chip_memory_usage_for_spikes

    @property
    @overrides(AbstractChangableAfterRun.requires_mapping)
    def requires_mapping(self):
        return self._requires_mapping

    @overrides(AbstractChangableAfterRun.mark_no_changes)
    def mark_no_changes(self):
        self._requires_mapping = False

    @property
    def spike_times(self):
        """ The spike times of the spike source array
        """
        return self.send_buffer_times

    @spike_times.setter
    def spike_times(self, spike_times):
        """ Set the spike source array's spike times. Not an extend, but an\
            actual change

        """
        self.send_buffer_times = spike_times

    @overrides(AbstractSpikeRecordable.is_recording_spikes)
    def is_recording_spikes(self):
        return self._spike_recorder.record

    @overrides(AbstractSpikeRecordable.set_recording_spikes)
    def set_recording_spikes(
            self, new_state=True, sampling_interval=None, indexes=None):
        if sampling_interval is not None:
            logger.warning("Sampling interval currently not supported for "
                           "SpikeSourceArray so being ignored")
        if indexes is not None:
            logger.warning("Indexes currently not supported for "
                           "SpikeSourceArray so being ignored")
        self.enable_recording(
            self._spike_recorder_buffer_size,
            self._buffer_size_before_receive)
        self._requires_mapping = not self._spike_recorder.record
        self._spike_recorder.record = new_state

    @overrides(AbstractSpikeRecordable.get_spikes_sampling_interval)
    def get_spikes_sampling_interval(self):
        return globals_variables.get_simulator().machine_time_step

    @overrides(AbstractSpikeRecordable.get_spikes)
    def get_spikes(
            self, placements, graph_mapper, buffer_manager, machine_time_step):

        return self._spike_recorder.get_spikes(
            self.label, buffer_manager, 0,
            placements, graph_mapper, self,
            lambda vertex:
                vertex.virtual_key
                if vertex.virtual_key is not None
                else 0,
            machine_time_step)

    @overrides(AbstractSpikeRecordable.clear_spike_recording)
    def clear_spike_recording(self, buffer_manager, placements, graph_mapper):
        machine_vertices = graph_mapper.get_machine_vertices(self)
        for machine_vertex in machine_vertices:
            placement = placements.get_placement_of_vertex(machine_vertex)
            buffer_manager.clear_recorded_data(
                placement.x, placement.y, placement.p,
                SpikeSourceArray.SPIKE_RECORDING_REGION_ID)

    @staticmethod
    def set_model_max_atoms_per_core(new_value=sys.maxint):
        SpikeSourceArray._model_based_max_atoms_per_core = new_value

    @staticmethod
    def get_max_atoms_per_core():
        return SpikeSourceArray._model_based_max_atoms_per_core

    def describe(self):
        """ Returns a human-readable description of the cell or synapse type.

        The output may be customised by specifying a different template\
        together with an associated template engine\
        (see ``pyNN.descriptions``).

        If template is None, then a dictionary containing the template\
        context will be returned.
        """

        parameters = dict()
        for parameter_name in self.default_parameters:
            parameters[parameter_name] = self.get_value(parameter_name)

        context = {
            "name": self._model_name,
            "default_parameters": self.default_parameters,
            "default_initial_values": self.default_parameters,
            "parameters": parameters,
        }
        return context
class SpikeSourceArrayVertex(
        ReverseIpTagMultiCastSource, AbstractSpikeRecordable,
        SimplePopulationSettable, AbstractChangableAfterRun,
        ProvidesKeyToAtomMappingImpl):
    """ Model for play back of spikes
    """

    SPIKE_RECORDING_REGION_ID = 0

    def __init__(
            self, n_neurons, spike_times, constraints, label,
            max_atoms_per_core, model):
        # pylint: disable=too-many-arguments
        self._model_name = "SpikeSourceArray"
        self._model = model

        if spike_times is None:
            spike_times = []
        self._spike_times = spike_times
        time_step = self.get_spikes_sampling_interval()

        super(SpikeSourceArrayVertex, self).__init__(
            n_keys=n_neurons, label=label, constraints=constraints,
            max_atoms_per_core=max_atoms_per_core,
            send_buffer_times=_send_buffer_times(spike_times, time_step),
            send_buffer_partition_id=constants.SPIKE_PARTITION_ID)

        # handle recording
        self._spike_recorder = EIEIOSpikeRecorder()

        # used for reset and rerun
        self._requires_mapping = True

    @property
    @overrides(AbstractChangableAfterRun.requires_mapping)
    def requires_mapping(self):
        return self._requires_mapping

    @overrides(AbstractChangableAfterRun.mark_no_changes)
    def mark_no_changes(self):
        self._requires_mapping = False

    @property
    def spike_times(self):
        """ The spike times of the spike source array
        """
        return self._spike_times

    @spike_times.setter
    def spike_times(self, spike_times):
        """ Set the spike source array's spike times. Not an extend, but an\
            actual change

        """
        time_step = self.get_spikes_sampling_interval()
        self.send_buffer_times = _send_buffer_times(spike_times, time_step)
        self._spike_times = spike_times

    @overrides(AbstractSpikeRecordable.is_recording_spikes)
    def is_recording_spikes(self):
        return self._spike_recorder.record

    @overrides(AbstractSpikeRecordable.set_recording_spikes)
    def set_recording_spikes(
            self, new_state=True, sampling_interval=None, indexes=None):
        if sampling_interval is not None:
            logger.warning("Sampling interval currently not supported for "
                           "SpikeSourceArray so being ignored")
        if indexes is not None:
            logger.warning("Indexes currently not supported for "
                           "SpikeSourceArray so being ignored")
        self.enable_recording(new_state)
        self._requires_mapping = not self._spike_recorder.record
        self._spike_recorder.record = new_state

    @overrides(AbstractSpikeRecordable.get_spikes_sampling_interval)
    def get_spikes_sampling_interval(self):
        return globals_variables.get_simulator().machine_time_step

    @overrides(AbstractSpikeRecordable.get_spikes)
    def get_spikes(
            self, placements, graph_mapper, buffer_manager, machine_time_step):

        return self._spike_recorder.get_spikes(
            self.label, buffer_manager, 0,
            placements, graph_mapper, self,
            lambda vertex:
                vertex.virtual_key
                if vertex.virtual_key is not None
                else 0,
            machine_time_step)

    @overrides(AbstractSpikeRecordable.clear_spike_recording)
    def clear_spike_recording(self, buffer_manager, placements, graph_mapper):
        machine_vertices = graph_mapper.get_machine_vertices(self)
        for machine_vertex in machine_vertices:
            placement = placements.get_placement_of_vertex(machine_vertex)
            buffer_manager.clear_recorded_data(
                placement.x, placement.y, placement.p,
                SpikeSourceArrayVertex.SPIKE_RECORDING_REGION_ID)

    @staticmethod
    def set_model_max_atoms_per_core(new_value=sys.maxsize):
        SpikeSourceArrayVertex._model_based_max_atoms_per_core = new_value

    def describe(self):
        """ Returns a human-readable description of the cell or synapse type.

        The output may be customised by specifying a different template\
        together with an associated template engine\
        (see ``pyNN.descriptions``).

        If template is None, then a dictionary containing the template\
        context will be returned.
        """

        parameters = dict()
        for parameter_name in self._model.default_parameters:
            parameters[parameter_name] = self.get_value(parameter_name)

        context = {
            "name": self._model_name,
            "default_parameters": self._model.default_parameters,
            "default_initial_values": self._model.default_parameters,
            "parameters": parameters,
        }
        return context
Ejemplo n.º 13
0
class SpikeSourceArrayVertex(
        ReverseIpTagMultiCastSource, AbstractSpikeRecordable,
        SimplePopulationSettable, AbstractChangableAfterRun,
        ProvidesKeyToAtomMappingImpl):
    """ Model for play back of spikes
    """

    SPIKE_RECORDING_REGION_ID = 0

    def __init__(
            self, n_neurons, spike_times, constraints, label,
            max_atoms_per_core, model, splitter):
        # pylint: disable=too-many-arguments
        self.__model_name = "SpikeSourceArray"
        self.__model = model
        if spike_times is None:
            spike_times = []
        self._spike_times = spike_times
        time_step = self.get_spikes_sampling_interval()

        super().__init__(
            n_keys=n_neurons, label=label, constraints=constraints,
            max_atoms_per_core=max_atoms_per_core,
            send_buffer_times=_send_buffer_times(spike_times, time_step),
            send_buffer_partition_id=constants.SPIKE_PARTITION_ID,
            splitter=splitter)

        # handle recording
        self.__spike_recorder = EIEIOSpikeRecorder()

        # used for reset and rerun
        self.__requires_mapping = True

    @property
    @overrides(AbstractChangableAfterRun.requires_mapping)
    def requires_mapping(self):
        return self.__requires_mapping

    @overrides(AbstractChangableAfterRun.mark_no_changes)
    def mark_no_changes(self):
        self.__requires_mapping = False

    @property
    def spike_times(self):
        """ The spike times of the spike source array
        """
        return list(self._spike_times)

    def _to_early_spikes_single_list(self, spike_times):
        """
        Checks if there is one or more spike_times before the current time

        Logs a warning for the first oen found

        :param iterable(int spike_times:
        """
        current_time = get_simulator().get_current_time()
        for i in range(len(spike_times)):
            if spike_times[i] < current_time:
                logger.warning(
                    "SpikeSourceArray {} has spike_times that are lower than "
                    "the current time {} For example {} - "
                    "these will be ignored.".format(
                        self, current_time, float(spike_times[i])))
                return

    def _check_spikes_double_list(self, spike_times):
        """
        Checks if there is one or more spike_times before the current time

        Logs a warning for the first oen found

        :param iterable(iterable(int) spike_times:
        """
        current_time = get_simulator().get_current_time()
        for neuron_id in range(0, self.n_atoms):
            id_times = spike_times[neuron_id]
            for i in range(len(id_times)):
                if id_times[i] < current_time:
                    logger.warning(
                       "SpikeSourceArray {} has spike_times that are lower "
                       "than the current time {} For example {} - "
                       "these will be ignored.".format(
                            self, current_time, float(id_times[i])))
                    return

    @spike_times.setter
    def spike_times(self, spike_times):
        """ Set the spike source array's spike times. Not an extend, but an\
            actual change

        """
        time_step = self.get_spikes_sampling_interval()
        # warn the user if they are asking for a spike time out of range
        if spike_times:  # in case of empty list do not check
            if hasattr(spike_times[0], '__iter__'):
                self._check_spikes_double_list(spike_times)
            else:
                self._to_early_spikes_single_list(spike_times)
        self.send_buffer_times = _send_buffer_times(spike_times, time_step)
        self._spike_times = spike_times

    @overrides(AbstractSpikeRecordable.is_recording_spikes)
    def is_recording_spikes(self):
        return self.__spike_recorder.record

    @overrides(AbstractSpikeRecordable.set_recording_spikes)
    def set_recording_spikes(
            self, new_state=True, sampling_interval=None, indexes=None):
        if sampling_interval is not None:
            logger.warning("Sampling interval currently not supported for "
                           "SpikeSourceArray so being ignored")
        if indexes is not None:
            logger.warning("Indexes currently not supported for "
                           "SpikeSourceArray so being ignored")
        self.enable_recording(new_state)
        self.__requires_mapping = not self.__spike_recorder.record
        self.__spike_recorder.record = new_state

    @overrides(AbstractSpikeRecordable.get_spikes_sampling_interval)
    def get_spikes_sampling_interval(self):
        return machine_time_step()

    @overrides(AbstractSpikeRecordable.get_spikes)
    def get_spikes(self, placements, buffer_manager):
        return self.__spike_recorder.get_spikes(
            self.label, buffer_manager, 0, placements, self,
            lambda vertex:
                vertex.virtual_key
                if vertex.virtual_key is not None
                else 0)

    @overrides(AbstractSpikeRecordable.clear_spike_recording)
    def clear_spike_recording(self, buffer_manager, placements):
        for machine_vertex in self.machine_vertices:
            placement = placements.get_placement_of_vertex(machine_vertex)
            buffer_manager.clear_recorded_data(
                placement.x, placement.y, placement.p,
                SpikeSourceArrayVertex.SPIKE_RECORDING_REGION_ID)

    def describe(self):
        """ Returns a human-readable description of the cell or synapse type.

        The output may be customised by specifying a different template\
        together with an associated template engine\
        (see :py:mod:`pyNN.descriptions`).

        If template is None, then a dictionary containing the template\
        context will be returned.
        """

        parameters = dict()
        for parameter_name in self.__model.default_parameters:
            parameters[parameter_name] = self.get_value(parameter_name)

        context = {
            "name": self.__model_name,
            "default_parameters": self.__model.default_parameters,
            "default_initial_values": self.__model.default_parameters,
            "parameters": parameters,
        }
        return context

    @overrides(SimplePopulationSettable.set_value_by_selector)
    def set_value_by_selector(self, selector, key, value):
        if key == "spike_times":
            old_values = self.get_value(key)
            if isinstance(old_values, RangedListOfList):
                ranged_list = old_values
            else:
                # Keep all the setting stuff in one place by creating a
                # RangedListofLists
                ranged_list = RangedListOfList(
                    size=self.n_atoms, value=old_values)
            ranged_list.set_value_by_selector(
                selector, value, ranged_list.is_list(value, self.n_atoms))
            self.set_value(key, ranged_list)
        else:
            SimplePopulationSettable.set_value_by_selector(
                self, selector, key, value)