Ejemplo n.º 1
0
    def get_max_rewires_per_ts(self):
        max_rewires_per_ts = 1
        if (self.p_rew * MICRO_TO_MILLISECOND_CONVERSION <
                machine_time_step() / MICRO_TO_MILLISECOND_CONVERSION):
            # fast rewiring, so need to set max_rewires_per_ts
            max_rewires_per_ts = int(machine_time_step() /
                                     (self.p_rew * MICRO_TO_SECOND_CONVERSION))

        return max_rewires_per_ts
    def __write_common_rewiring_data(
            self, spec, app_vertex, vertex_slice, n_pre_pops):
        """ Write the non-sub-population synapse parameters to the spec.

        :param ~data_specification.DataSpecificationGenerator spec:
            the data spec
        :param ~pacman.model.graphs.application.ApplicationVertex app_vertex:
            The application vertex being generated
        :param ~pacman.model.graphs.common.Slice vertex_slice:
            The slice of the target vertex to generate for
        :param int n_pre_pops: the number of pre-populations
        :return: None
        :rtype: None
        """
        spec.comment("Writing common rewiring data")
        if (self.p_rew * MICRO_TO_MILLISECOND_CONVERSION <
                machine_time_step() / MICRO_TO_MILLISECOND_CONVERSION):
            # Fast rewiring
            spec.write_value(data=1)
            spec.write_value(data=int(
                machine_time_step() / (
                    self.p_rew * MICRO_TO_SECOND_CONVERSION)))
        else:
            # Slow rewiring
            spec.write_value(data=0)
            spec.write_value(data=int((
                self.p_rew * MICRO_TO_SECOND_CONVERSION) /
                machine_time_step()))
        # write s_max
        spec.write_value(data=int(self.s_max))
        # write total number of atoms in the application vertex
        spec.write_value(data=app_vertex.n_atoms)
        # write local low, high and number of atoms
        spec.write_value(data=vertex_slice.n_atoms)
        spec.write_value(data=vertex_slice.lo_atom)
        spec.write_value(data=vertex_slice.hi_atom)
        # write with_replacement
        spec.write_value(data=self.with_replacement)

        # write app level seeds
        spec.write_array(self.get_seeds(app_vertex))

        # write local seed (4 words), generated randomly!
        # Note that in case of a reset, these need a key to ensure subsequent
        # runs match the first run
        spec.write_array(self.get_seeds(vertex_slice))

        # write the number of pre-populations
        spec.write_value(data=n_pre_pops)
Ejemplo n.º 3
0
    def gen_data(self):
        """ Get the data to be written for this connection

        :rtype: ~numpy.ndarray(~numpy.uint32)
        """
        connector = self.__synapse_information.connector
        items = list()
        items.append(numpy.array([
            self.__max_row_n_synapses,
            self.__max_delayed_row_n_synapses,
            self.__post_vertex_slice.lo_atom,
            self.__post_vertex_slice.n_atoms,
            self.__max_stage,
            self.__delay_per_stage,
            DataType.S1615.encode_as_int(
                MICRO_TO_MILLISECOND_CONVERSION /
                machine_time_step()),
            connector.gen_connector_id,
            connector.gen_delays_id(self.__synapse_information.delays)],
            dtype="uint32"))
        items.append(connector.gen_connector_params(
            self.__pre_slices, self.__post_slices, self.__pre_vertex_slice,
            self.__post_vertex_slice, self.__synapse_information.synapse_type,
            self.__synapse_information))
        items.append(connector.gen_delay_params(
            self.__synapse_information.delays, self.__pre_vertex_slice,
            self.__post_vertex_slice))
        return numpy.concatenate(items)
Ejemplo n.º 4
0
 def get_data(self, parameters, state_variables, vertex_slice):
     # Work out the time step per step
     ts = machine_time_step()
     ts /= self.__n_steps_per_timestep
     items = [numpy.array([self.__n_steps_per_timestep], dtype="uint32")]
     items.extend(
         component.get_data(parameters, state_variables, vertex_slice, ts)
         for component in self.__components)
     return numpy.concatenate(items)
    def max_spikes_per_ts(self):
        ts_per_second = (MICRO_TO_SECOND_CONVERSION / machine_time_step())
        if float(self.__max_rate) / ts_per_second < \
                SLOW_RATE_PER_TICK_CUTOFF:
            return 1

        # Experiments show at 1000 this result is typically higher than actual
        chance_ts = 1000
        max_spikes_per_ts = scipy.stats.poisson.ppf(
            1.0 - (1.0 / float(chance_ts)),
            float(self.__max_rate) / ts_per_second)
        return int(math.ceil(max_spikes_per_ts)) + 1.0
Ejemplo n.º 6
0
    def __write_prepopulation_info(self, spec, app_vertex,
                                   structural_projections, routing_info,
                                   weight_scales, synaptic_matrices,
                                   post_vertex_slice):
        """
        :param ~data_specification.DataSpecificationGenerator spec:
        :param ~pacman.model.graphs.application.ApplicationVertex app_vertex:
            the vertex for which data specs are being prepared
        :param list(tuple(ProjectionApplicationEdge,SynapseInformation)) \
                structural_projections:
            Projections that are structural
        :param machine_edges_by_app:
            map of app edge to associated machine edges
        :type machine_edges_by_app:
            dict(~pacman.model.graphs.application.ApplicationEdge,
            list(~pacman.model.graphs.machine.MachineEdge))
        :param RoutingInfo routing_info:
        :param dict(AbstractSynapseType,float) weight_scales:
        :param SynapticMatrices synaptic_matrices:
        :rtype: dict(tuple(AbstractPopulationVertex,SynapseInformation),int)
        """
        spec.comment("Writing pre-population info")
        pop_index = dict()
        index = 0
        for proj in structural_projections:
            spec.comment("Writing pre-population info for {}".format(
                proj.label))
            app_edge = proj._projection_edge
            synapse_info = proj._synapse_information
            pop_index[app_edge.pre_vertex, synapse_info] = index
            index += 1
            dynamics = synapse_info.synapse_dynamics

            machine_edges = list()
            for machine_edge in app_edge.machine_edges:
                if machine_edge.post_vertex.vertex_slice == post_vertex_slice:
                    machine_edges.append(machine_edge)

            # Number of machine edges
            spec.write_value(len(machine_edges), data_type=DataType.UINT16)
            # Controls - currently just if this is a self connection or not
            self_connected = app_vertex == app_edge.pre_vertex
            spec.write_value(int(self_connected), data_type=DataType.UINT16)
            # Delay
            delay_scale = (MICRO_TO_MILLISECOND_CONVERSION /
                           machine_time_step())
            if isinstance(dynamics.initial_delay, collections.Iterable):
                spec.write_value(int(dynamics.initial_delay[0] * delay_scale),
                                 data_type=DataType.UINT16)
                spec.write_value(int(dynamics.initial_delay[1] * delay_scale),
                                 data_type=DataType.UINT16)
            else:
                scaled_delay = dynamics.initial_delay * delay_scale
                spec.write_value(scaled_delay, data_type=DataType.UINT16)
                spec.write_value(scaled_delay, data_type=DataType.UINT16)

            # Weight
            spec.write_value(
                round(dynamics.initial_weight *
                      weight_scales[synapse_info.synapse_type]))
            # Connection type
            spec.write_value(synapse_info.synapse_type)
            # Total number of atoms in pre-vertex
            spec.write_value(app_edge.pre_vertex.n_atoms)
            # Machine edge information
            for machine_edge in machine_edges:
                r_info = routing_info.get_routing_info_for_edge(machine_edge)
                vertex_slice = machine_edge.pre_vertex.vertex_slice
                spec.write_value(r_info.first_key)
                spec.write_value(r_info.first_mask)
                spec.write_value(vertex_slice.n_atoms)
                spec.write_value(vertex_slice.lo_atom)
                spec.write_value(
                    synaptic_matrices.get_index(app_edge, synapse_info,
                                                machine_edge))
        return pop_index
Ejemplo n.º 7
0
    def conn_list(self, conn_list):
        if conn_list is None or not len(conn_list):
            self.__conn_list = numpy.zeros((0, 2), dtype="uint32")
        else:
            self.__conn_list = numpy.array(conn_list)

        # If the shape of the conn_list is 2D, numpy has been able to create
        # a 2D array which means every entry has the same number of values.
        # If this was not possible, raise an exception!
        if len(self.__conn_list.shape) != 2:
            raise InvalidParameterType(
                "Each tuple in the connection list for the"
                " FromListConnector must have the same number of elements")

        # This tells us how many columns are in the list
        n_columns = self.__conn_list.shape[1]
        if n_columns < 2:
            raise InvalidParameterType(
                "Each tuple in the connection list for the"
                " FromListConnector must have at least 2 elements")
        if (self.__column_names is not None
                and n_columns != len(self.__column_names) + _FIRST_PARAM):
            raise InvalidParameterType(
                "The number of column names must match the number of"
                " additional elements in each tuple in the connection list,"
                " not including the pre_idx or post_idx")

        # Get the column names if not specified
        column_names = self.__column_names
        if self.__column_names is None:
            if n_columns == 4:
                column_names = ('weight', 'delay')
            elif n_columns == 2:
                column_names = ()
            else:
                raise TypeError(
                    "Need to set 'column_names' for n_columns={}".format(
                        n_columns))

        # Set the source and targets
        self.__sources = self.__conn_list[:, _SOURCE]
        self.__targets = self.__conn_list[:, _TARGET]

        # Find any weights
        self.__weights = None
        try:
            weight_column = column_names.index('weight') + _FIRST_PARAM
            self.__weights = self.__conn_list[:, weight_column]
        except ValueError:
            pass

        # Find any delays
        self.__delays = None
        try:
            delay_column = column_names.index('delay') + _FIRST_PARAM
            self.__delays = (
                numpy.rint(
                    numpy.array(self.__conn_list[:, delay_column]) *
                    (MICRO_TO_MILLISECOND_CONVERSION / machine_time_step())) *
                (machine_time_step() / MICRO_TO_MILLISECOND_CONVERSION))
        except ValueError:
            pass

        # Find extra columns
        extra_columns = list()
        for i, name in enumerate(column_names):
            if name not in ('weight', 'delay'):
                extra_columns.append(i + _FIRST_PARAM)

        # Check any additional parameters have single values over the whole
        # set of connections (as other things aren't currently supported
        for i in extra_columns:
            # numpy.ptp gives the difference between the maximum and
            # minimum values of an array, so if 0, all values are equal
            if numpy.ptp(self.__conn_list[:, i]):
                raise ValueError(
                    "All values in column {} ({}) of a FromListConnector must"
                    " have the same value".format(
                        i, column_names[i - _FIRST_PARAM]))

        # Store the extra data
        self.__extra_parameters = None
        self.__extra_parameter_names = None
        if extra_columns:
            self.__extra_parameters = self.__conn_list[:, extra_columns]
            self.__extra_parameter_names = [
                column_names[i - _FIRST_PARAM] for i in extra_columns
            ]
Ejemplo n.º 8
0
 def get_spikes_sampling_interval(self):
     return machine_time_step()
    def read_parameters_from_machine(self, transceiver, placement,
                                     vertex_slice):

        # locate SDRAM address where parameters are stored
        poisson_params = self.poisson_param_region_address(
            placement, transceiver)
        seed_array = _FOUR_WORDS.unpack_from(
            transceiver.read_memory(placement.x, placement.y,
                                    poisson_params + self.SEED_OFFSET_BYTES,
                                    self.SEED_SIZE_BYTES))
        self._app_vertex.update_kiss_seed(vertex_slice, seed_array)

        # locate SDRAM address where the rates are stored
        poisson_rate_region_sdram_address = (self.poisson_rate_region_address(
            placement, transceiver))

        # get size of poisson params
        size_of_region = get_rates_bytes(vertex_slice, self._app_vertex.rates)

        # get data from the machine
        byte_array = transceiver.read_memory(
            placement.x, placement.y, poisson_rate_region_sdram_address,
            size_of_region)

        # For each atom, read the number of rates and the rate parameters
        offset = 0
        for i in range(vertex_slice.lo_atom, vertex_slice.hi_atom + 1):
            n_values, = _ONE_WORD.unpack_from(byte_array, offset)
            offset += 4

            # Skip reading the index, as it will be recalculated on data write
            offset += 4

            (_start, _end, _next, is_fast_source, exp_minus_lambda,
             sqrt_lambda, isi,
             time_to_next_spike) = (self._PoissonStruct.read_data(
                 byte_array, offset, n_values))
            offset += (self._PoissonStruct.get_size_in_whole_words(n_values) *
                       BYTES_PER_WORD)

            # Work out the spikes per tick depending on if the source is
            # slow (isi), fast (exp) or faster (sqrt)
            is_fast_source = is_fast_source == 1.0
            spikes_per_tick = numpy.zeros(len(is_fast_source), dtype="float")
            spikes_per_tick[is_fast_source] = numpy.log(
                exp_minus_lambda[is_fast_source]) * -1.0
            is_faster_source = sqrt_lambda > 0
            # pylint: disable=assignment-from-no-return
            spikes_per_tick[is_faster_source] = numpy.square(
                sqrt_lambda[is_faster_source])
            slow_elements = isi > 0
            spikes_per_tick[slow_elements] = 1.0 / isi[slow_elements]

            # Convert spikes per tick to rates
            self._app_vertex.rates.set_value_by_id(
                i,
                spikes_per_tick *
                (MICRO_TO_SECOND_CONVERSION / machine_time_step()))

            # Store the updated time until next spike so that it can be
            # rewritten when the parameters are loaded
            self._app_vertex.time_to_spike.set_value_by_id(
                i, time_to_next_spike)
 def _convert_ms_to_n_timesteps(value):
     return numpy.round(value * (MICRO_TO_MILLISECOND_CONVERSION /
                                 machine_time_step())).astype("uint32")
    def _write_poisson_parameters(self, spec, graph, placement, routing_info):
        """ Generate Parameter data for Poisson spike sources

        :param ~data_specification.DataSpecification spec:
            the data specification writer
        :param ~pacman.model.graphs.machine.MachineGraph graph:
        :param ~pacman.model.placements.Placement placement:
        :param ~pacman.model.routing_info.RoutingInfo routing_info:
        """
        # pylint: disable=too-many-arguments, too-many-locals
        spec.comment("\nWriting Parameters for {} poisson sources:\n".format(
            self.vertex_slice.n_atoms))

        # Set the focus to the memory region 2 (neuron parameters):
        spec.switch_write_focus(
            self.POISSON_SPIKE_SOURCE_REGIONS.POISSON_PARAMS_REGION.value)

        # Write Key info for this core:
        key = routing_info.get_first_key_from_pre_vertex(
            placement.vertex, constants.SPIKE_PARTITION_ID)
        spec.write_value(data=1 if key is not None else 0)
        spec.write_value(data=key if key is not None else 0)

        # Write the incoming mask if there is one
        in_edges = graph.get_edges_ending_at_vertex_with_partition_name(
            placement.vertex, constants.LIVE_POISSON_CONTROL_PARTITION_ID)
        if len(in_edges) > 1:
            raise ConfigurationException(
                "Only one control edge can end at a Poisson vertex")
        incoming_mask = 0
        if len(in_edges) == 1:
            in_edge = in_edges[0]

            # Get the mask of the incoming keys
            incoming_mask = \
                routing_info.get_routing_info_for_edge(in_edge).first_mask
            incoming_mask = ~incoming_mask & 0xFFFFFFFF
        spec.write_value(incoming_mask)

        # Write the number of seconds per timestep (unsigned long fract)
        spec.write_value(data=machine_time_step() / MICRO_TO_SECOND_CONVERSION,
                         data_type=DataType.U032)

        # Write the number of timesteps per second (integer)
        spec.write_value(data=int(MICRO_TO_SECOND_CONVERSION /
                                  machine_time_step()))

        # Write the slow-rate-per-tick-cutoff (accum)
        spec.write_value(data=self.SLOW_RATE_PER_TICK_CUTOFF,
                         data_type=DataType.S1615)

        # Write the fast-rate-per-tick-cutoff (accum)
        spec.write_value(data=self.FAST_RATE_PER_TICK_CUTOFF,
                         data_type=DataType.S1615)

        # Write the lo_atom ID
        spec.write_value(data=self.vertex_slice.lo_atom)

        # Write the number of sources
        spec.write_value(data=self.vertex_slice.n_atoms)

        # Write the maximum spikes per tick
        spec.write_value(data=self.max_spikes_per_ts())

        # Write the random seed (4 words), generated randomly!
        for value in self._app_vertex.kiss_seed(self.vertex_slice):
            spec.write_value(data=value)
    def _write_poisson_rates(self, spec, first_machine_time_step):
        """ Generate Rate data for Poisson spike sources

        :param ~data_specification.DataSpecification spec:
            the data specification writer
        :param int first_machine_time_step:
            First machine time step to start from the correct index
        """
        spec.comment("\nWriting Rates for {} poisson sources:\n".format(
            self.vertex_slice.n_atoms))

        # Set the focus to the memory region 2 (neuron parameters):
        spec.switch_write_focus(
            self.POISSON_SPIKE_SOURCE_REGIONS.RATES_REGION.value)

        # Extract the data on which to work and convert to appropriate form
        starts = numpy.array(
            list(_flatten(self._app_vertex.start[
                self.vertex_slice.as_slice]))).astype("float")
        durations = numpy.array(
            list(
                _flatten(self._app_vertex.duration[
                    self.vertex_slice.as_slice]))).astype("float")
        local_rates = self._app_vertex.rates[self.vertex_slice.as_slice]
        n_rates = numpy.array([len(r) for r in local_rates])
        splits = numpy.cumsum(n_rates)
        rates = numpy.array(list(_flatten(local_rates)))
        time_to_spike = numpy.array(
            list(
                _flatten(self._app_vertex.time_to_spike[
                    self.vertex_slice.as_slice]))).astype("u4")
        rate_change = self._app_vertex.rate_change[self.vertex_slice.as_slice]

        # Convert start times to start time steps
        starts_scaled = self._convert_ms_to_n_timesteps(starts)

        # Convert durations to end time steps, using the maximum for "None"
        # duration (which means "until the end")
        no_duration = numpy.isnan(durations)
        durations_filtered = numpy.where(no_duration, 0, durations)
        ends_scaled = self._convert_ms_to_n_timesteps(
            durations_filtered) + starts_scaled
        ends_scaled = (numpy.where(no_duration, self._MAX_TIMESTEP,
                                   ends_scaled))

        # Work out the timestep at which the next rate activates, using
        # the maximum value at the end (meaning there is no "next")
        starts_split = numpy.array_split(starts_scaled, splits)
        next_scaled = numpy.concatenate([
            numpy.append(s[1:], self._MAX_TIMESTEP) for s in starts_split[:-1]
        ])

        # Compute the spikes per tick for each rate for each atom
        spikes_per_tick = rates * (machine_time_step() /
                                   MICRO_TO_SECOND_CONVERSION)
        # Determine the properties of the sources
        is_fast_source = spikes_per_tick >= self.SLOW_RATE_PER_TICK_CUTOFF
        is_faster_source = spikes_per_tick >= self.FAST_RATE_PER_TICK_CUTOFF
        not_zero = spikes_per_tick > 0
        # pylint: disable=assignment-from-no-return
        is_slow_source = numpy.logical_not(is_fast_source)

        # Compute the e^-(spikes_per_tick) for fast sources to allow fast
        # computation of the Poisson distribution to get the number of
        # spikes per timestep
        exp_minus_lambda = DataType.U032.encode_as_numpy_int_array(
            numpy.where(is_fast_source, numpy.exp(-1.0 * spikes_per_tick), 0))

        # Compute sqrt(lambda) for "faster" sources to allow Gaussian
        # approximation of the Poisson distribution to get the number of
        # spikes per timestep
        sqrt_lambda = DataType.S1615.encode_as_numpy_int_array(
            numpy.where(is_faster_source, numpy.sqrt(spikes_per_tick), 0))

        # Compute the inter-spike-interval for slow sources to get the
        # average number of timesteps between spikes
        isi_val = numpy.where(not_zero & is_slow_source,
                              (1.0 / spikes_per_tick).astype(int),
                              0).astype("uint32")

        # Reuse the time-to-spike read from the machine (if has been run)
        # or don't if the rate has since been changed
        time_to_spike_split = numpy.array_split(time_to_spike, splits)
        time_to_spike = numpy.concatenate([
            t if rate_change[i] else numpy.repeat(0, len(t))
            for i, t in enumerate(time_to_spike_split[:-1])
        ])

        # Turn the fast source booleans into uint32
        is_fast_source = is_fast_source.astype("uint32")

        # Group together the rate data for the core by rate
        core_data = numpy.dstack(
            (starts_scaled, ends_scaled, next_scaled, is_fast_source,
             exp_minus_lambda, sqrt_lambda, isi_val, time_to_spike))[0]

        # Group data by neuron id
        core_data_split = numpy.array_split(core_data, splits)

        # Work out the index where the core should start based on the given
        # first timestep
        ends_scaled_split = numpy.array_split(ends_scaled, splits)
        indices = [
            numpy.argmax(e > first_machine_time_step)
            for e in ends_scaled_split[:-1]
        ]

        # Build the final data for this core, and write it
        final_data = numpy.concatenate([
            numpy.concatenate(([len(d), indices[i]], numpy.concatenate(d)))
            for i, d in enumerate(core_data_split[:-1])
        ])
        spec.write_array(final_data)