コード例 #1
0
def convert_param_to_numpy(param, no_atoms):
    """ Convert parameters into numpy arrays

    :param param: the param to convert
    :param no_atoms: the number of atoms available for conversion of param
    :return numpy.array: the converted param in whatever format it was given
    """

    # Deal with random distributions by generating values
    if globals_variables.get_simulator().is_a_pynn_random(param):
        if no_atoms > 1:
            return numpy.asarray(param.next(n=no_atoms), dtype="float")

        # numpy reduces a single valued array to a single value, so enforce
        # that it is an array
        return numpy.array([param.next(n=no_atoms)], dtype="float")

    # Deal with a single value by exploding to multiple values
    if not hasattr(param, '__iter__'):
        return numpy.array([param] * no_atoms, dtype="float")

    # Deal with multiple values, but not the correct number of them
    if len(param) != no_atoms:

        raise exceptions.ConfigurationException(
            "The number of params does not equal with the number of atoms in"
            " the vertex")

    # Deal with the correct number of multiple values
    return numpy.array(param, dtype="float")
コード例 #2
0
    def update_weight(self, graph_mapper):
        pre_vertex = graph_mapper.get_application_vertex(self.pre_vertex)
        pre_slice_index = graph_mapper.get_machine_vertex_index(
            self.pre_vertex)
        pre_vertex_slice = graph_mapper.get_slice(self.pre_vertex)
        pre_slices = graph_mapper.get_slices(pre_vertex)
        post_vertex = graph_mapper.get_application_vertex(self.post_vertex)
        post_slice_index = graph_mapper.get_machine_vertex_index(
            self.post_vertex)
        post_vertex_slice = graph_mapper.get_slice(self.post_vertex)
        post_slices = graph_mapper.get_slices(post_vertex)

        weight = 0
        for synapse_info in self._synapse_information:
            new_weight = synapse_info.connector.\
                get_n_connections_to_post_vertex_maximum(
                    pre_slices, pre_slice_index, post_slices,
                    post_slice_index, pre_vertex_slice, post_vertex_slice)
            new_weight *= pre_vertex_slice.n_atoms
            if hasattr(pre_vertex, "rate"):
                rate = pre_vertex.rate
                if hasattr(rate, "__getitem__"):
                    rate = max(rate)
                elif globals_variables.get_simulator().is_a_pynn_random(rate):
                    rate = utility_calls.get_maximum_probable_value(
                        rate, pre_vertex_slice.n_atoms)
                new_weight *= rate
            elif hasattr(pre_vertex, "spikes_per_second"):
                new_weight *= pre_vertex.spikes_per_second
            weight += new_weight

        self._traffic_weight = weight
コード例 #3
0
 def _get_n_connections_from_pre_vertex_with_delay_maximum(
         delays, n_total_connections, n_connections, connection_slices,
         min_delay, max_delay):
     """ Gets the expected number of delays that will fall within min_delay\
         and max_delay given given a float, RandomDistribution or list of\
         delays
     """
     if globals_variables.get_simulator().is_a_pynn_random(delays):
         prob_in_range = utility_calls.get_probability_within_range(
             delays, min_delay, max_delay)
         return int(
             math.ceil(
                 utility_calls.get_probable_maximum_selected(
                     n_total_connections, n_connections, prob_in_range)))
     elif numpy.isscalar(delays):
         if min_delay <= delays <= max_delay:
             return int(math.ceil(n_connections))
         return 0
     elif hasattr(delays, "__getitem__"):
         n_delayed = sum([
             len([
                 delay for delay in delays[connection_slice]
                 if min_delay <= delay <= max_delay
             ]) for connection_slice in connection_slices
         ])
         n_total = sum([
             len(delays[connection_slice])
             for connection_slice in connection_slices
         ])
         prob_delayed = float(n_delayed) / float(n_total)
         return int(
             math.ceil(
                 utility_calls.get_probable_maximum_selected(
                     n_total_connections, n_delayed, prob_delayed)))
     raise Exception("Unrecognised delay format")
コード例 #4
0
    def _get_weight_maximum(weights, n_connections, connection_slices):
        """ Get the maximum of the weights
        """
        if globals_variables.get_simulator().is_a_pynn_random(weights):
            mean_weight = utility_calls.get_mean(weights)
            if mean_weight < 0:
                min_weight = utility_calls.get_minimum_probable_value(
                    weights, n_connections)
                if weights.boundaries is not None:
                    return abs(max(min_weight, min(weights.boundaries)))
                return abs(min_weight)
            else:
                max_weight = utility_calls.get_maximum_probable_value(
                    weights, n_connections)
                if weights.boundaries is not None:
                    return abs(min(max_weight, max(weights.boundaries)))
                return abs(max_weight)

        elif numpy.isscalar(weights):
            return abs(weights)
        elif hasattr(weights, "__getitem__"):
            return numpy.amax([
                numpy.abs(weights[connection_slice])
                for connection_slice in connection_slices
            ])
        raise Exception("Unrecognised weight format")
コード例 #5
0
    def _generate_values(self, values, n_connections, connection_slices):
        if globals_variables.get_simulator().is_a_pynn_random(values):
            if n_connections == 1:
                return numpy.array([values.next(n_connections)])
            return values.next(n_connections)
        elif numpy.isscalar(values):
            return numpy.repeat([values], n_connections)
        elif hasattr(values, "__getitem__"):
            return numpy.concatenate([
                values[connection_slice]
                for connection_slice in connection_slices
            ])
        elif isinstance(values, basestring) or callable(values):
            if self._space is None:
                raise Exception(
                    "No space object specified in projection {}-{}".format(
                        self._pre_population, self._post_population))

            expand_distances = True
            if isinstance(values, basestring):
                expand_distances = self._expand_distances(values)

            d = self._space.distances(self._pre_population.positions,
                                      self._post_population.positions,
                                      expand_distances)

            if isinstance(values, basestring):
                return eval(values)
            return values(d)
コード例 #6
0
def get_probability_within_range(dist, lower, upper):
    """ Get the probability that a value will fall within the given range for\
        a given RandomDistribution
    """
    simulator = globals_variables.get_simulator()
    stats = simulator.get_distribution_to_stats()[dist.name]
    return (stats.cdf(dist, upper) - stats.cdf(dist, lower))
コード例 #7
0
def get_minimum_probable_value(dist, n_items, chance=(1.0 / 100.0)):
    """ Get the likely minimum value of a RandomDistribution given a\
        number of draws
    """
    simulator = globals_variables.get_simulator()
    stats = simulator.get_distribution_to_stats()[dist.name]
    prob = chance / float(n_items)
    return stats.ppf(dist, prob)
コード例 #8
0
    def _get_gsyn_inhibitory(self):
        """ get the gsyn inhibitory values from the vertex

        :return: the gsyn inhibitory values
        """
        if isinstance(self._population._vertex,
                      AbstractGSynInhibitoryRecordable):
            if not self._population._vertex.is_recording_gsyn_inhibitory():
                raise fec_excceptions.ConfigurationException(
                    "This population has not been set to record gsyn "
                    "inhibitory")
        else:
            raise fec_excceptions.ConfigurationException(
                "This population has not got the capability to record gsyn "
                "inhibitory")

        if not globals_variables.get_simulator().has_ran:
            logger.warn(
                "The simulation has not yet run, therefore gsyn inhibitory "
                "cannot be retrieved, hence the list will be empty")
            return numpy.zeros((0, 4))

        if globals_variables.get_simulator().use_virtual_board:
            logger.warn(
                "The simulation is using a virtual machine and so has not"
                " truly ran, hence the list will be empty")
            return numpy.zeros((0, 4))

        return self._population._vertex.get_gsyn_inhibitory(
            globals_variables.get_simulator().no_machine_time_steps,
            globals_variables.get_simulator().placements,
            globals_variables.get_simulator().graph_mapper,
            globals_variables.get_simulator().buffer_manager,
            globals_variables.get_simulator().machine_time_step)
コード例 #9
0
    def _get_v(self):
        """ get the voltage from the vertex

        :return: the voltages
        """

        # check that we're ina  state to get voltages
        if isinstance(self._population._vertex, AbstractVRecordable):
            if not self._population._vertex.is_recording_v():
                raise fec_excceptions.ConfigurationException(
                    "This population has not been set to record v")
        else:
            raise fec_excceptions.ConfigurationException(
                "This population has not got the capability to record v")

        if not globals_variables.get_simulator().has_ran:
            logger.warn("The simulation has not yet run, therefore v cannot"
                        " be retrieved, hence the list will be empty")
            return numpy.zeros((0, 3))

        if globals_variables.get_simulator().use_virtual_board:
            logger.warn(
                "The simulation is using a virtual machine and so has not"
                " truly ran, hence the list will be empty")
            return numpy.zeros((0, 3))

            # assuming we got here, everything is ok, so we should go get the
            # voltages
        return self._population._vertex.get_v(
            globals_variables.get_simulator().no_machine_time_steps,
            globals_variables.get_simulator().placements,
            globals_variables.get_simulator().graph_mapper,
            globals_variables.get_simulator().buffer_manager,
            globals_variables.get_simulator().machine_time_step)
コード例 #10
0
    def __init__(self,
                 n_neurons,
                 constraints=none_pynn_default_parameters['constraints'],
                 label=none_pynn_default_parameters['label'],
                 rate=default_parameters['rate'],
                 start=default_parameters['start'],
                 duration=default_parameters['duration'],
                 seed=none_pynn_default_parameters['seed']):
        ApplicationVertex.__init__(self, label, constraints,
                                   self._model_based_max_atoms_per_core)
        AbstractSpikeRecordable.__init__(self)
        AbstractProvidesOutgoingPartitionConstraints.__init__(self)
        AbstractChangableAfterRun.__init__(self)
        SimplePopulationSettable.__init__(self)
        ProvidesKeyToAtomMappingImpl.__init__(self)

        config = globals_variables.get_simulator().config

        # atoms params
        self._n_atoms = n_neurons
        self._seed = None

        # check for changes parameters
        self._change_requires_mapping = True
        self._change_requires_neuron_parameters_reload = False

        # Store the parameters
        self._rate = utility_calls.convert_param_to_numpy(rate, n_neurons)
        self._start = utility_calls.convert_param_to_numpy(start, n_neurons)
        self._duration = utility_calls.convert_param_to_numpy(
            duration, n_neurons)
        self._time_to_spike = utility_calls.convert_param_to_numpy(
            0, n_neurons)
        self._rng = numpy.random.RandomState(seed)
        self._machine_time_step = None

        # Prepare for recording, and to get spikes
        self._spike_recorder = MultiSpikeRecorder()
        self._time_between_requests = config.getint("Buffers",
                                                    "time_between_requests")
        self._receive_buffer_host = config.get("Buffers",
                                               "receive_buffer_host")
        self._receive_buffer_port = helpful_functions.read_config_int(
            config, "Buffers", "receive_buffer_port")
        self._minimum_buffer_sdram = config.getint("Buffers",
                                                   "minimum_buffer_sdram")
        self._using_auto_pause_and_resume = config.getboolean(
            "Buffers", "use_auto_pause_and_resume")

        spike_buffer_max_size = 0
        self._buffer_size_before_receive = None
        if config.getboolean("Buffers", "enable_buffered_recording"):
            spike_buffer_max_size = config.getint("Buffers",
                                                  "spike_buffer_size")
            self._buffer_size_before_receive = config.getint(
                "Buffers", "buffer_size_before_receive")
        self._maximum_sdram_for_buffering = [spike_buffer_max_size]
コード例 #11
0
 def set_projection_information(self, pre_population, post_population, rng,
                                machine_time_step):
     self._pre_population = pre_population
     self._post_population = post_population
     self._n_pre_neurons = pre_population.size
     self._n_post_neurons = post_population.size
     self._rng = rng
     if self._rng is None:
         self._rng = globals_variables.get_simulator().get_pynn_NumpyRNG()
     self._min_delay = machine_time_step / 1000.0
コード例 #12
0
 def _check_parameter(self, values, name, allow_lists):
     """ Check that the types of the values is supported
     """
     if (not numpy.isscalar(values) and not (
             globals_variables.get_simulator().is_a_pynn_random(values))
             and not hasattr(values, "__getitem__")):
         raise Exception("Parameter {} format unsupported".format(name))
     if not allow_lists and hasattr(values, "__getitem__"):
         raise NotImplementedError(
             "Lists of {} are not supported the implementation of"
             " {} on this platform".format(self.__class__))
コード例 #13
0
    def _read_parameters_before_set(self):
        """ Reads parameters from the machine before "set" completes

        :return: None
        """

        # If the tools have run before, and not reset, and the read
        # hasn't already been done, read back the data
        if (globals_variables.get_simulator().has_ran
                and not globals_variables.get_simulator().has_reset_last
                and isinstance(self._vertex, AbstractReadParametersBeforeSet)
                and not self._has_read_neuron_parameters_this_run):

            # locate machine vertices from the application vertices
            machine_vertices = globals_variables.get_simulator().graph_mapper\
                .get_machine_vertices(self._vertex)

            # go through each machine vertex and read the neuron parameters
            # it contains
            for machine_vertex in machine_vertices:

                # tell the core to rewrite neuron params back to the
                # sdram space.
                placement = globals_variables.get_simulator().placements.\
                    get_placement_of_vertex(machine_vertex)

                self._vertex.read_parameters_from_machine(
                    globals_variables.get_simulator().transceiver, placement,
                    globals_variables.get_simulator().graph_mapper.get_slice(
                        machine_vertex))

            self._has_read_neuron_parameters_this_run = True
コード例 #14
0
 def _get_delay_variance(delays, connection_slices):
     """ Get the variance of the delays
     """
     if globals_variables.get_simulator().is_a_pynn_random(delays):
         return utility_calls.get_variance(delays)
     elif numpy.isscalar(delays):
         return 0.0
     elif hasattr(delays, "__getitem__"):
         return numpy.var([
             delays[connection_slice]
             for connection_slice in connection_slices
         ])
     raise Exception("Unrecognised delay format")
コード例 #15
0
 def _get_weight_variance(weights, connection_slices):
     """ Get the variance of the weights
     """
     if globals_variables.get_simulator().is_a_pynn_random(weights):
         return utility_calls.get_variance(weights)
     elif numpy.isscalar(weights):
         return 0.0
     elif hasattr(weights, "__getitem__"):
         return numpy.var([
             numpy.abs(weights[connection_slice])
             for connection_slice in connection_slices
         ])
     raise Exception("Unrecognised weight format")
コード例 #16
0
    def create_label(model_label, pop_level_label):
        """ helper method for choosing a label from model and population levels

        :param model_label: the model level label
        :param pop_level_label: the pop level label
        :return: the new model level label
        """
        cell_label = None
        if model_label is None and pop_level_label is None:
            cell_label = "Population {}".format(
                globals_variables.get_simulator().none_labelled_vertex_count)
            globals_variables.get_simulator(). \
                increment_none_labelled_vertex_count()
        elif model_label is None and pop_level_label is not None:
            cell_label = pop_level_label
        elif model_label is not None and pop_level_label is None:
            cell_label = model_label
        elif model_label is not None and pop_level_label is not None:
            cell_label = pop_level_label
            logger.warn("Don't know which label to use. Will use pop "
                        "label and carry on")
        return cell_label
コード例 #17
0
    def initialize(self, variable, value):
        """ Set the initial value of one of the state variables of the neurons\
            in this population.

        """
        if not isinstance(self._vertex, AbstractPopulationInitializable):
            raise KeyError(
                "Population does not support the initialisation of {}".format(
                    variable))
        if globals_variables.get_simulator().has_ran and not isinstance(
                self._vertex, AbstractChangableAfterRun):
            raise Exception("Population does not support changes after run")
        self._vertex.initialize(
            variable,
            utility_calls.convert_param_to_numpy(value, self._vertex.n_atoms))
コード例 #18
0
    def _generate_lists_on_host(self, values):
        """ Checks if the connector should generate lists on host rather than\
            trying to generate the connectivity data on the machine, based on\
            the types of the weights and/or delays
        """

        # Scalars are fine on the machine
        if numpy.isscalar(values):
            return True

        # Only certain types of random distributions are supported for\
        # generation on the machine
        if globals_variables.get_simulator().is_a_pynn_random(values):
            return values.name in ("uniform", "uniform_int", "poisson",
                                   "normal", "exponential")

        return False
コード例 #19
0
    def _get_delay_maximum(delays, n_connections):
        """ Get the maximum delay given a float, RandomDistribution or list of\
            delays
        """
        if globals_variables.get_simulator().is_a_pynn_random(delays):
            max_estimated_delay = utility_calls.get_maximum_probable_value(
                delays, n_connections)
            if hasattr(delays, "boundaries"):
                if delays.boundaries is not None:
                    return min(max(delays.boundaries), max_estimated_delay)
            elif isinstance(delays.parameters, dict):
                if "max" in delays.parameters:
                    return delays.parameters['max']

            return max_estimated_delay
        elif numpy.isscalar(delays):
            return delays
        elif hasattr(delays, "__getitem__"):
            return max(delays)
        raise Exception("Unrecognised delay format")
コード例 #20
0
    def set(self, parameter, value=None):
        """ Set one or more parameters for every cell in the population.

        param can be a dict, in which case value should not be supplied, or a
        string giving the parameter name, in which case value is the parameter
        value. value can be a numeric value, or list of such
        (e.g. for setting spike times)::

          p.set("tau_m", 20.0).
          p.set({'tau_m':20, 'v_rest':-65})

        :param parameter: the parameter to set
        :param value: the value of the parameter to set.
        """
        if not isinstance(self._vertex, AbstractPopulationSettable):
            raise KeyError(
                "Population does not have property {}".format(parameter))

        if globals_variables.get_simulator().has_ran and not isinstance(
                self._vertex, AbstractChangableAfterRun):
            raise Exception(
                "This population does not support changes to settings after"
                " run has been called")

        if type(parameter) is str:
            if value is None:
                raise Exception("A value (not None) must be specified")
            self._read_parameters_before_set()
            self._vertex.set_value(parameter, value)
            return

        if type(parameter) is not dict:
            raise Exception(
                "Parameter must either be the name of a single parameter to"
                " set, or a dict of parameter: value items to set")

        # set new parameters
        self._read_parameters_before_set()
        for (key, value) in parameter.iteritems():
            self._vertex.set_value(key, value)
コード例 #21
0
def get_standard_deviation(dist):
    """ Get the standard deviation of a RandomDistribution
    """
    simulator = globals_variables.get_simulator()
    stats = simulator.get_distribution_to_stats()[dist.name]
    return stats.std(dist)
コード例 #22
0
 def test_globals_variable(self):
     sim = globals_variables.get_simulator()
     self.assertTrue(isinstance(sim, FailedState))
コード例 #23
0
    def __init__(self,
                 n_neurons,
                 spike_times=default_parameters['spike_times'],
                 port=none_pynn_default_parameters['port'],
                 tag=none_pynn_default_parameters['tag'],
                 ip_address=none_pynn_default_parameters['ip_address'],
                 board_address=none_pynn_default_parameters['board_address'],
                 max_on_chip_memory_usage_for_spikes_in_bytes=DEFAULT1,
                 space_before_notification=none_pynn_default_parameters[
                     'space_before_notification'],
                 constraints=none_pynn_default_parameters['constraints'],
                 label=none_pynn_default_parameters['label'],
                 spike_recorder_buffer_size=none_pynn_default_parameters[
                     'spike_recorder_buffer_size'],
                 buffer_size_before_receive=none_pynn_default_parameters[
                     'buffer_size_before_receive']):

        config = globals_variables.get_simulator().config
        self._ip_address = ip_address
        if ip_address is None:
            self._ip_address = config.get("Buffers", "receive_buffer_host")
        self._port = port
        if port is None:
            self._port = helpful_functions.read_config_int(
                config, "Buffers", "receive_buffer_port")
        if spike_times is None:
            spike_times = []

        ReverseIpTagMultiCastSource.__init__(
            self,
            n_keys=n_neurons,
            label=label,
            constraints=constraints,
            max_atoms_per_core=(
                SpikeSourceArray._model_based_max_atoms_per_core),
            board_address=board_address,
            receive_port=None,
            receive_tag=None,
            virtual_key=None,
            prefix=None,
            prefix_type=None,
            check_keys=False,
            send_buffer_times=spike_times,
            send_buffer_partition_id=constants.SPIKE_PARTITION_ID,
            send_buffer_max_space=max_on_chip_memory_usage_for_spikes_in_bytes,
            send_buffer_space_before_notify=space_before_notification,
            buffer_notification_ip_address=self._ip_address,
            buffer_notification_port=self._port,
            buffer_notification_tag=tag)

        AbstractSpikeRecordable.__init__(self)
        AbstractProvidesOutgoingPartitionConstraints.__init__(self)
        SimplePopulationSettable.__init__(self)
        AbstractChangableAfterRun.__init__(self)
        ProvidesKeyToAtomMappingImpl.__init__(self)

        # handle recording
        self._spike_recorder = EIEIOSpikeRecorder()
        self._spike_recorder_buffer_size = spike_recorder_buffer_size
        self._buffer_size_before_receive = buffer_size_before_receive

        # Keep track of any previously generated buffers
        self._send_buffers = dict()
        self._spike_recording_region_size = None
        self._machine_vertices = list()

        # used for reset and rerun
        self._requires_mapping = True
        self._last_runtime_position = 0

        self._max_on_chip_memory_usage_for_spikes = \
            max_on_chip_memory_usage_for_spikes_in_bytes
        self._space_before_notification = space_before_notification
        if self._max_on_chip_memory_usage_for_spikes is None:
            self._max_on_chip_memory_usage_for_spikes = \
                front_end_common_constants.MAX_SIZE_OF_BUFFERED_REGION_ON_CHIP

        # check the values do not conflict with chip memory limit
        if self._max_on_chip_memory_usage_for_spikes < 0:
            raise exceptions.ConfigurationException(
                "The memory usage on chip is either beyond what is supportable"
                " on the spinnaker board being supported or you have requested"
                " a negative value for a memory usage. Please correct and"
                " try again")

        if (self._max_on_chip_memory_usage_for_spikes <
                self._space_before_notification):
            self._space_before_notification =\
                self._max_on_chip_memory_usage_for_spikes
コード例 #24
0
    def _get_ring_buffer_to_input_left_shifts(self, machine_vertex,
                                              machine_graph, graph_mapper,
                                              post_slices, post_slice_index,
                                              post_vertex_slice,
                                              machine_timestep, weight_scale):
        """ Get the scaling of the ring buffer to provide as much accuracy as\
            possible without too much overflow
        """
        weight_scale_squared = weight_scale * weight_scale
        n_synapse_types = self._synapse_type.get_n_synapse_types()
        running_totals = [RunningStats() for _ in range(n_synapse_types)]
        delay_running_totals = [RunningStats() for _ in range(n_synapse_types)]
        total_weights = numpy.zeros(n_synapse_types)
        biggest_weight = numpy.zeros(n_synapse_types)
        weights_signed = False
        rate_stats = [RunningStats() for _ in range(n_synapse_types)]

        for machine_edge in \
                machine_graph.get_edges_ending_at_vertex(machine_vertex):

            pre_vertex_slice = graph_mapper.get_slice(machine_edge.pre_vertex)
            app_edge = graph_mapper.get_application_edge(machine_edge)
            pre_slices = [
                graph_mapper.get_slice(internal_machine_vertex)
                for internal_machine_vertex in
                graph_mapper.get_machine_vertices(app_edge.pre_vertex)
            ]
            pre_slice_index = pre_slices.index(pre_vertex_slice)
            if isinstance(app_edge, ProjectionApplicationEdge):
                for synapse_info in app_edge.synapse_information:
                    synapse_type = synapse_info.synapse_type
                    synapse_dynamics = synapse_info.synapse_dynamics
                    connector = synapse_info.connector
                    weight_mean = abs(
                        synapse_dynamics.get_weight_mean(
                            connector, pre_slices, pre_slice_index,
                            post_slices, post_slice_index, pre_vertex_slice,
                            post_vertex_slice) * weight_scale)
                    n_connections = \
                        connector.get_n_connections_to_post_vertex_maximum(
                            pre_slices, pre_slice_index, post_slices,
                            post_slice_index, pre_vertex_slice,
                            post_vertex_slice)
                    weight_variance = abs(
                        synapse_dynamics.get_weight_variance(
                            connector, pre_slices, pre_slice_index,
                            post_slices, post_slice_index, pre_vertex_slice,
                            post_vertex_slice) * weight_scale_squared)
                    running_totals[synapse_type].add_items(
                        weight_mean, weight_variance, n_connections)

                    delay_variance = synapse_dynamics.get_delay_variance(
                        connector, pre_slices, pre_slice_index, post_slices,
                        post_slice_index, pre_vertex_slice, post_vertex_slice)
                    delay_running_totals[synapse_type].add_items(
                        0.0, delay_variance, n_connections)

                    weight_max = (synapse_dynamics.get_weight_maximum(
                        connector, pre_slices, pre_slice_index, post_slices,
                        post_slice_index, pre_vertex_slice, post_vertex_slice)
                                  * weight_scale)
                    biggest_weight[synapse_type] = max(
                        biggest_weight[synapse_type], weight_max)

                    spikes_per_tick = max(
                        1.0, self._spikes_per_second /
                        (1000000.0 / float(machine_timestep)))
                    spikes_per_second = self._spikes_per_second
                    if isinstance(app_edge.pre_vertex, SpikeSourcePoisson):
                        spikes_per_second = app_edge.pre_vertex.rate
                        if hasattr(spikes_per_second, "__getitem__"):
                            spikes_per_second = max(spikes_per_second)
                        elif globals_variables.get_simulator().\
                                is_a_pynn_random(spikes_per_second):
                            spikes_per_second = \
                                utility_calls.get_maximum_probable_value(
                                    spikes_per_second,
                                    pre_vertex_slice.n_atoms)
                        prob = 1.0 - ((1.0 / 100.0) / pre_vertex_slice.n_atoms)
                        spikes_per_tick = (
                            spikes_per_second /
                            (1000000.0 / float(machine_timestep)))
                        spikes_per_tick = scipy.stats.poisson.ppf(
                            prob, spikes_per_tick)
                    rate_stats[synapse_type].add_items(spikes_per_second, 0,
                                                       n_connections)
                    total_weights[synapse_type] += spikes_per_tick * (
                        weight_max * n_connections)

                    if synapse_dynamics.are_weights_signed():
                        weights_signed = True

        max_weights = numpy.zeros(n_synapse_types)
        for synapse_type in range(n_synapse_types):
            stats = running_totals[synapse_type]
            rates = rate_stats[synapse_type]
            if delay_running_totals[synapse_type].variance == 0.0:
                max_weights[synapse_type] = total_weights[synapse_type]
            else:
                max_weights[synapse_type] = min(
                    self._ring_buffer_expected_upper_bound(
                        stats.mean, stats.standard_deviation, rates.mean,
                        machine_timestep, stats.n_items,
                        self._ring_buffer_sigma), total_weights[synapse_type])
                max_weights[synapse_type] = max(max_weights[synapse_type],
                                                biggest_weight[synapse_type])

        # Convert these to powers
        max_weight_powers = [
            0 if w <= 0 else int(math.ceil(max(0, math.log(w, 2))))
            for w in max_weights
        ]

        # If 2^max_weight_power equals the max weight, we have to add another
        # power, as range is 0 - (just under 2^max_weight_power)!
        max_weight_powers = [
            w + 1 if (2**w) <= a else w
            for w, a in zip(max_weight_powers, max_weights)
        ]

        # If we have synapse dynamics that uses signed weights,
        # Add another bit of shift to prevent overflows
        if weights_signed:
            max_weight_powers = [m + 1 for m in max_weight_powers]

        return max_weight_powers
コード例 #25
0
def get_variance(dist):
    """ Get the variance of a RandomDistribution
    """
    simulator = globals_variables.get_simulator()
    stats = simulator.get_distribution_to_stats()[dist.name]
    return stats.var(dist)