예제 #1
0
    def __init__(
            self, n_neurons, machine_time_step, timescale_factor,
            constraints=None, label="SpikeSourcePoisson", rate=1.0, start=0.0,
            duration=None, seed=None):
        AbstractPartitionableVertex.__init__(
            self, n_atoms=n_neurons, label=label, constraints=constraints,
            max_atoms_per_core=self._model_based_max_atoms_per_core)
        AbstractDataSpecableVertex.__init__(
            self, machine_time_step=machine_time_step,
            timescale_factor=timescale_factor)
        AbstractSpikeRecordable.__init__(self)
        ReceiveBuffersToHostBasicImpl.__init__(self)
        AbstractProvidesOutgoingEdgeConstraints.__init__(self)
        PopulationSettableChangeRequiresMapping.__init__(self)

        # Store the parameters
        self._rate = rate
        self._start = start
        self._duration = duration
        self._rng = numpy.random.RandomState(seed)

        # Prepare for recording, and to get spikes
        self._spike_recorder = SpikeRecorder(machine_time_step)
        self._spike_buffer_max_size = config.getint(
            "Buffers", "spike_buffer_size")
        self._buffer_size_before_receive = config.getint(
            "Buffers", "buffer_size_before_receive")
        self._time_between_requests = config.getint(
            "Buffers", "time_between_requests")
 def __init__(
         self, resources_required, label, is_recording, constraints=None):
     PartitionedVertex.__init__(
         self, resources_required, label, constraints)
     ReceiveBuffersToHostBasicImpl.__init__(self)
     ProvidesProvenanceDataFromMachineImpl.__init__(
         self, constants.POPULATION_BASED_REGIONS.PROVENANCE_DATA.value,
         self.N_ADDITIONAL_PROVENANCE_DATA_ITEMS)
     AbstractRecordable.__init__(self)
     self._is_recording = is_recording
예제 #3
0
 def __init__(
         self, resources_required, label, is_recording, constraints=None):
     PartitionedVertex.__init__(
         self, resources_required, label, constraints=constraints)
     ReceiveBuffersToHostBasicImpl.__init__(self)
     ProvidesProvenanceDataFromMachineImpl.__init__(
         self, self._POISSON_SPIKE_SOURCE_REGIONS.PROVENANCE_REGION.value,
         0)
     AbstractRecordable.__init__(self)
     self._is_recording = is_recording
 def __init__(self,
              resources_required,
              label,
              is_recording,
              constraints=None):
     PartitionedVertex.__init__(self, resources_required, label,
                                constraints)
     ReceiveBuffersToHostBasicImpl.__init__(self)
     ProvidesProvenanceDataFromMachineImpl.__init__(
         self, constants.POPULATION_BASED_REGIONS.PROVENANCE_DATA.value,
         self.N_ADDITIONAL_PROVENANCE_DATA_ITEMS)
     AbstractRecordable.__init__(self)
     self._is_recording = is_recording
예제 #5
0
    def get_sdram_usage_for_atoms(self, vertex_slice, graph):
        sdram_requirement = (
            self._get_sdram_usage_for_neuron_params(vertex_slice) +
            ReceiveBuffersToHostBasicImpl.get_buffer_state_region_size(3) +
            PopulationPartitionedVertex.get_provenance_data_size(
                PopulationPartitionedVertex.N_ADDITIONAL_PROVENANCE_DATA_ITEMS)
            + self._synapse_manager.get_sdram_usage_in_bytes(
                vertex_slice, graph.incoming_edges_to_vertex(self)) +
            (self._get_number_of_mallocs_used_by_dsg(
                vertex_slice, graph.incoming_edges_to_vertex(self)) *
             common_constants.SARK_PER_MALLOC_SDRAM_USAGE))

        # add recording SDRAM if not automatically calculated
        if not self._using_auto_pause_and_resume:
            spike_buffer_size = self._spike_recorder.get_sdram_usage_in_bytes(
                vertex_slice.n_atoms, self._no_machine_time_steps)
            v_buffer_size = self._v_recorder.get_sdram_usage_in_bytes(
                vertex_slice.n_atoms, self._no_machine_time_steps)
            gsyn_buffer_size = self._gsyn_recorder.get_sdram_usage_in_bytes(
                vertex_slice.n_atoms, self._no_machine_time_steps)
            sdram_requirement += recording_utils.get_buffer_sizes(
                self._spike_buffer_max_size, spike_buffer_size,
                self._enable_buffered_recording)
            sdram_requirement += recording_utils.get_buffer_sizes(
                self._v_buffer_max_size, v_buffer_size,
                self._enable_buffered_recording)
            sdram_requirement += recording_utils.get_buffer_sizes(
                self._gsyn_buffer_max_size, gsyn_buffer_size,
                self._enable_buffered_recording)
        else:
            sdram_requirement += self._minimum_buffer_sdram

        return sdram_requirement
예제 #6
0
 def _get_sdram_usage_for_neuron_params(self, vertex_slice):
     per_neuron_usage = (
         self._input_type.get_sdram_usage_per_neuron_in_bytes() +
         self._threshold_type.get_sdram_usage_per_neuron_in_bytes())
     if self._additional_input is not None:
         per_neuron_usage += \
             self._additional_input.get_sdram_usage_per_neuron_in_bytes()
     return (
         (common_constants.DATA_SPECABLE_BASIC_SETUP_INFO_N_WORDS * 4) +
         ReceiveBuffersToHostBasicImpl.get_recording_data_size(3) +
         (per_neuron_usage * vertex_slice.n_atoms) +
         self._neuron_model.get_sdram_usage_in_bytes(vertex_slice.n_atoms))
예제 #7
0
    def get_sdram_usage_for_atoms(self, vertex_slice, graph):
        poisson_params_sz = self.get_params_bytes(vertex_slice)
        total_size = \
            ((front_end_common_constants.
              DATA_SPECABLE_BASIC_SETUP_INFO_N_WORDS * 4) +
             ReceiveBuffersToHostBasicImpl.get_recording_data_size(1) +
             ReceiveBuffersToHostBasicImpl.get_buffer_state_region_size(1) +
             SpikeSourcePoissonPartitionedVertex.get_provenance_data_size(0) +
             poisson_params_sz)
        total_size += self._get_number_of_mallocs_used_by_dsg(
            vertex_slice, graph.incoming_edges_to_vertex(self)) * \
            front_end_common_constants.SARK_PER_MALLOC_SDRAM_USAGE

        if self._using_auto_pause_and_resume:
            total_size += self._minimum_buffer_sdram
        else:
            spike_buffer_size = self._spike_recorder.get_sdram_usage_in_bytes(
                vertex_slice.n_atoms, self._no_machine_time_steps)
            total_size += recording_utils.get_buffer_sizes(
                self._spike_buffer_max_size, spike_buffer_size,
                self._enable_buffered_recording)

        return total_size
    def __init__(
            self, n_keys, resources_required, machine_time_step,
            timescale_factor, label, constraints=None,

            # General input and output parameters
            board_address=None,

            # Live input parameters
            receive_port=None,
            receive_sdp_port=(
                constants.SDP_PORTS.INPUT_BUFFERING_SDP_PORT.value),
            receive_tag=None,

            # Key parameters
            virtual_key=None, prefix=None,
            prefix_type=None, check_keys=False,

            # Send buffer parameters
            send_buffer_times=None,
            send_buffer_max_space=(
                constants.MAX_SIZE_OF_BUFFERED_REGION_ON_CHIP),
            send_buffer_space_before_notify=640,
            send_buffer_notification_ip_address=None,
            send_buffer_notification_port=None,
            send_buffer_notification_tag=None):
        """

        :param n_keys: The number of keys to be sent via this multicast source
        :param resources_required: The resources required by the vertex
        :param machine_time_step: The time step to be used on the machine
        :param timescale_factor: The time scaling to be used in the simulation
        :param label: The label of this vertex
        :param constraints: Any initial constraints to this vertex
        :param board_address: The IP address of the board on which to place\
                this vertex if receiving data, either buffered or live (by\
                default, any board is chosen)
        :param receive_port: The port on the board that will listen for\
                incoming event packets (default is to disable this feature;\
                set a value to enable it)
        :param receive_sdp_port: The SDP port to listen on for incoming event\
                packets (defaults to 1)
        :param receive_tag: The IP tag to use for receiving live events\
                (uses any by default)
        :param virtual_key: The base multicast key to send received events\
                with (assigned automatically by default)
        :param prefix: The prefix to "or" with generated multicast keys\
                (default is no prefix)
        :param prefix_type: Whether the prefix should apply to the upper or\
                lower half of the multicast keys (default is upper half)
        :param check_keys: True if the keys of received events should be\
                verified before sending (default False)
        :param send_buffer_times: An array of arrays of times at which keys\
                should be sent (one array for each key, default disabled)
        :param send_buffer_max_space: The maximum amount of space to use of\
                the SDRAM on the machine (default is 1MB)
        :param send_buffer_space_before_notify: The amount of space free in\
                the sending buffer before the machine will ask the host for\
                more data (default setting is optimised for most cases)
        :param send_buffer_notification_ip_address: The IP address of the host\
                that will send new buffers (must be specified if a send buffer\
                is specified)
        :param send_buffer_notification_port: The port that the host that will\
                send new buffers is listening on (must be specified if a\
                send buffer is specified)
        :param send_buffer_notification_tag: The IP tag to use to notify the\
                host about space in the buffer (default is to use any tag)
        """

        # Set up super types
        PartitionedVertex.__init__(
            self, resources_required, label, constraints)
        AbstractDataSpecableVertex.__init__(
            self, machine_time_step, timescale_factor)
        ProvidesProvenanceDataFromMachineImpl.__init__(
            self, self._REGIONS.PROVENANCE_REGION.value, 0)
        AbstractProvidesOutgoingPartitionConstraints.__init__(self)
        ReceiveBuffersToHostBasicImpl.__init__(self)

        # Set up for receiving live packets
        if receive_port is not None:
            self.add_constraint(TagAllocatorRequireReverseIptagConstraint(
                receive_port, receive_sdp_port, board_address, receive_tag))

        # Work out if buffers are being sent
        self._first_machine_time_step = 0
        self._send_buffer = None
        if send_buffer_times is None:
            self._send_buffer_times = None
            SendsBuffersFromHostPreBufferedImpl.__init__(
                self, None)
        else:
            self._send_buffer = BufferedSendingRegion(send_buffer_max_space)
            self._send_buffer_times = send_buffer_times

            self.add_constraint(TagAllocatorRequireIptagConstraint(
                send_buffer_notification_ip_address,
                send_buffer_notification_port, True, board_address,
                send_buffer_notification_tag))
            SendsBuffersFromHostPreBufferedImpl.__init__(
                self, {self._REGIONS.SEND_BUFFER.value: self._send_buffer})

        # buffered out parameters
        self._send_buffer_space_before_notify = send_buffer_space_before_notify
        self._send_buffer_notification_ip_address = \
            send_buffer_notification_ip_address
        self._send_buffer_notification_port = send_buffer_notification_port
        self._send_buffer_notification_tag = send_buffer_notification_tag
        if self._send_buffer_space_before_notify > send_buffer_max_space:
            self._send_buffer_space_before_notify = send_buffer_max_space

        # Set up for recording (if requested)
        self._record_buffer_size = 0
        self._buffer_size_before_receive = 0

        # set flag for checking if in injection mode
        self._in_injection_mode = receive_port is not None

        # Sort out the keys to be used
        self._n_keys = n_keys
        self._virtual_key = virtual_key
        self._mask = None
        self._prefix = prefix
        self._prefix_type = prefix_type
        self._check_keys = check_keys

        # Work out the prefix details
        if self._prefix is not None:
            if self._prefix_type is None:
                self._prefix_type = EIEIOPrefix.UPPER_HALF_WORD
            if self._prefix_type == EIEIOPrefix.UPPER_HALF_WORD:
                self._prefix = prefix << 16

        # If the user has specified a virtual key
        if self._virtual_key is not None:

            # check that virtual key is valid
            if self._virtual_key < 0:
                raise ConfigurationException(
                    "Virtual keys must be positive")

            # Get a mask and maximum number of keys for the number of keys
            # requested
            self._mask, max_key = self._calculate_mask(n_keys)

            # Check that the number of keys and the virtual key don't interfere
            if n_keys > max_key:
                raise ConfigurationException(
                    "The mask calculated from the number of keys will "
                    "not work with the virtual key specified")

            if self._prefix is not None:

                # Check that the prefix doesn't change the virtual key in the
                # masked area
                masked_key = (self._virtual_key | self._prefix) & self._mask
                if self._virtual_key != masked_key:
                    raise ConfigurationException(
                        "The number of keys, virtual key and key prefix"
                        " settings don't work together")
            else:

                # If no prefix was generated, generate one
                self._prefix_type = EIEIOPrefix.UPPER_HALF_WORD
                self._prefix = self._virtual_key
    def __init__(
            self, n_keys, resources_required, machine_time_step,
            timescale_factor, label, constraints=None,

            # General input and output parameters
            board_address=None,

            # Live input parameters
            receive_port=None,
            receive_sdp_port=(
                constants.SDP_PORTS.INPUT_BUFFERING_SDP_PORT.value),
            receive_tag=None,

            # Key parameters
            virtual_key=None, prefix=None,
            prefix_type=None, check_keys=False,

            # Send buffer parameters
            send_buffer_times=None,
            send_buffer_max_space=(
                constants.MAX_SIZE_OF_BUFFERED_REGION_ON_CHIP),
            send_buffer_space_before_notify=640,
            send_buffer_notification_ip_address=None,
            send_buffer_notification_port=None,
            send_buffer_notification_tag=None):
        """

        :param n_keys: The number of keys to be sent via this multicast source
        :param resources_required: The resources required by the vertex
        :param machine_time_step: The time step to be used on the machine
        :param timescale_factor: The time scaling to be used in the simulation
        :param label: The label of this vertex
        :param constraints: Any initial constraints to this vertex
        :param board_address: The IP address of the board on which to place\
                this vertex if receiving data, either buffered or live (by\
                default, any board is chosen)
        :param receive_port: The port on the board that will listen for\
                incoming event packets (default is to disable this feature;\
                set a value to enable it)
        :param receive_sdp_port: The SDP port to listen on for incoming event\
                packets (defaults to 1)
        :param receive_tag: The IP tag to use for receiving live events\
                (uses any by default)
        :param virtual_key: The base multicast key to send received events\
                with (assigned automatically by default)
        :param prefix: The prefix to "or" with generated multicast keys\
                (default is no prefix)
        :param prefix_type: Whether the prefix should apply to the upper or\
                lower half of the multicast keys (default is upper half)
        :param check_keys: True if the keys of received events should be\
                verified before sending (default False)
        :param send_buffer_times: An array of arrays of times at which keys\
                should be sent (one array for each key, default disabled)
        :param send_buffer_max_space: The maximum amount of space to use of\
                the SDRAM on the machine (default is 1MB)
        :param send_buffer_space_before_notify: The amount of space free in\
                the sending buffer before the machine will ask the host for\
                more data (default setting is optimised for most cases)
        :param send_buffer_notification_ip_address: The IP address of the host\
                that will send new buffers (must be specified if a send buffer\
                is specified)
        :param send_buffer_notification_port: The port that the host that will\
                send new buffers is listening on (must be specified if a\
                send buffer is specified)
        :param send_buffer_notification_tag: The IP tag to use to notify the\
                host about space in the buffer (default is to use any tag)
        """

        # Set up super types
        AbstractDataSpecableVertex.__init__(
            self, machine_time_step, timescale_factor)
        PartitionedVertex.__init__(
            self, resources_required, label, constraints)
        AbstractProvidesOutgoingEdgeConstraints.__init__(self)
        ReceiveBuffersToHostBasicImpl.__init__(self)

        # Set up for receiving live packets
        if receive_port is not None:
            self.add_constraint(TagAllocatorRequireReverseIptagConstraint(
                receive_port, receive_sdp_port, board_address, receive_tag))

        # Work out if buffers are being sent
        self._first_machine_time_step = 0
        self._send_buffer = None
        if send_buffer_times is None:
            self._send_buffer_times = None
            SendsBuffersFromHostPartitionedVertexPreBufferedImpl.__init__(
                self, None)
        else:
            self._send_buffer = BufferedSendingRegion(send_buffer_max_space)
            self._send_buffer_times = send_buffer_times

            self.add_constraint(TagAllocatorRequireIptagConstraint(
                send_buffer_notification_ip_address,
                send_buffer_notification_port, True, board_address,
                send_buffer_notification_tag))
            SendsBuffersFromHostPartitionedVertexPreBufferedImpl.__init__(
                self, {self._REGIONS.SEND_BUFFER.value: self._send_buffer})

        # buffered out parameters
        self._send_buffer_space_before_notify = send_buffer_space_before_notify
        self._send_buffer_notification_ip_address = \
            send_buffer_notification_ip_address
        self._send_buffer_notification_port = send_buffer_notification_port
        self._send_buffer_notification_tag = send_buffer_notification_tag
        if self._send_buffer_space_before_notify > send_buffer_max_space:
            self._send_buffer_space_before_notify = send_buffer_max_space

        # Set up for recording (if requested)
        self._record_buffer_size = 0
        self._buffer_size_before_receive = 0

        # Sort out the keys to be used
        self._n_keys = n_keys
        self._virtual_key = virtual_key
        self._mask = None
        self._prefix = prefix
        self._prefix_type = prefix_type
        self._check_keys = check_keys

        # Work out the prefix details
        if self._prefix is not None:
            if self._prefix_type is None:
                self._prefix_type = EIEIOPrefix.UPPER_HALF_WORD
            if self._prefix_type == EIEIOPrefix.UPPER_HALF_WORD:
                self._prefix = prefix << 16

        # If the user has specified a virtual key
        if self._virtual_key is not None:

            # check that virtual key is valid
            if self._virtual_key < 0:
                raise ConfigurationException(
                    "Virtual keys must be positive")

            # Get a mask and maximum number of keys for the number of keys
            # requested
            self._mask, max_key = self._calculate_mask(n_keys)

            # Check that the number of keys and the virtual key don't interfere
            if n_keys > max_key:
                raise ConfigurationException(
                    "The mask calculated from the number of keys will "
                    "not work with the virtual key specified")

            if self._prefix is not None:

                # Check that the prefix doesn't change the virtual key in the
                # masked area
                masked_key = (self._virtual_key | self._prefix) & self._mask
                if self._virtual_key != masked_key:
                    raise ConfigurationException(
                        "The number of keys, virtual key and key prefix"
                        " settings don't work together")
            else:

                # If no prefix was generated, generate one
                self._prefix_type = EIEIOPrefix.UPPER_HALF_WORD
                self._prefix = self._virtual_key
    def __init__(
            self, n_neurons, binary, label, max_atoms_per_core,
            machine_time_step, timescale_factor, spikes_per_second,
            ring_buffer_sigma, model_name, neuron_model, input_type,
            synapse_type, threshold_type, additional_input=None,
            constraints=None):

        ReceiveBuffersToHostBasicImpl.__init__(self)
        AbstractPartitionableVertex.__init__(
            self, n_neurons, label, max_atoms_per_core, constraints)
        AbstractDataSpecableVertex.__init__(
            self, machine_time_step, timescale_factor)
        AbstractSpikeRecordable.__init__(self)
        AbstractVRecordable.__init__(self)
        AbstractGSynRecordable.__init__(self)
        AbstractProvidesOutgoingEdgeConstraints.__init__(self)
        AbstractProvidesIncomingEdgeConstraints.__init__(self)
        AbstractPopulationInitializable.__init__(self)
        AbstractPopulationSettable.__init__(self)
        AbstractMappable.__init__(self)

        self._binary = binary
        self._label = label
        self._machine_time_step = machine_time_step
        self._timescale_factor = timescale_factor

        self._model_name = model_name
        self._neuron_model = neuron_model
        self._input_type = input_type
        self._threshold_type = threshold_type
        self._additional_input = additional_input

        # Set up for recording
        self._spike_recorder = SpikeRecorder(machine_time_step)
        self._v_recorder = VRecorder(machine_time_step)
        self._gsyn_recorder = GsynRecorder(machine_time_step)
        self._spike_buffer_max_size = config.getint(
            "Buffers", "spike_buffer_size")
        self._v_buffer_max_size = config.getint(
            "Buffers", "v_buffer_size")
        self._gsyn_buffer_max_size = config.getint(
            "Buffers", "gsyn_buffer_size")
        self._buffer_size_before_receive = config.getint(
            "Buffers", "buffer_size_before_receive")
        self._time_between_requests = config.getint(
            "Buffers", "time_between_requests")

        # Set up synapse handling
        self._synapse_manager = SynapticManager(
            synapse_type, machine_time_step, ring_buffer_sigma,
            spikes_per_second)

        # Get buffering information for later use
        self._receive_buffer_host = config.get(
            "Buffers", "receive_buffer_host")
        self._receive_buffer_port = config.getint(
            "Buffers", "receive_buffer_port")
        self._enable_buffered_recording = config.getboolean(
            "Buffers", "enable_buffered_recording")

        # bool for if state has changed.
        self._change_requires_mapping = True
예제 #11
0
    def __init__(
            self,
            n_neurons,
            machine_time_step,
            timescale_factor,
            spikes_per_second=None,
            ring_buffer_sigma=None,
            incoming_spike_buffer_size=None,
            constraints=None,
            label=None,

            # neuron model parameters
            primary=default_parameters['primary'],

            # threshold types parameters
            v_thresh=default_parameters['v_thresh'],

            # initial values for the state values
            v_init=None,
            receive_port=None,
            receive_tag=None,
            board_address=None):

        # create your neuron model class
        neuron_model = SpindleModel(n_neurons, machine_time_step, primary)

        # create your synapse type model
        synapse_type = FusimotorActivation(
            n_neurons, machine_time_step,
            MuscleSpindle.default_parameters['a_syn_D'],
            MuscleSpindle.default_parameters['tau_syn_D'],
            MuscleSpindle.default_parameters['a_syn_S'],
            MuscleSpindle.default_parameters['tau_syn_S'])

        # create your input type model
        input_type = InputTypeCurrent()

        # create your threshold type model
        threshold_type = ThresholdTypeStatic(n_neurons, v_thresh)

        # create your own additional inputs
        additional_input = None

        # instantiate the sPyNNaker system by initialising
        #  the AbstractPopulationVertex
        AbstractPopulationVertex.__init__(

            # standard inputs, do not need to change.
            self,
            n_neurons=n_neurons,
            label=label,
            machine_time_step=machine_time_step,
            timescale_factor=timescale_factor,
            spikes_per_second=spikes_per_second,
            ring_buffer_sigma=ring_buffer_sigma,
            incoming_spike_buffer_size=incoming_spike_buffer_size,

            # max units per core
            max_atoms_per_core=MuscleSpindle._model_based_max_atoms_per_core,

            # These are the various model types
            neuron_model=neuron_model,
            input_type=input_type,
            synapse_type=synapse_type,
            threshold_type=threshold_type,
            additional_input=additional_input,

            # model name (shown in reports)
            model_name="MuscleSpindle",

            # matching binary name
            binary="muscle_spindle.aplx")

        ReceiveBuffersToHostBasicImpl.__init__(self)

        self.add_constraint(
            TagAllocatorRequireReverseIptagConstraint(
                receive_port,
                constants.SDP_PORTS.INPUT_BUFFERING_SDP_PORT.value,
                board_address, receive_tag))