예제 #1
0
class Conjgrad(LeastSquaresSolver):
    """Solve a least-squares system using conjugate gradient."""

    tol = NumberParam('tol', low=0)
    maxiters = IntParam('maxiters', low=1, optional=True)
    X0 = NdarrayParam('X0', shape=('*', '*'), optional=True)

    def __init__(self, tol=1e-2, maxiters=None, X0=None):
        super(Conjgrad, self).__init__()
        self.tol = tol
        self.maxiters = maxiters
        self.X0 = X0

    def __call__(self, A, Y, sigma, rng=None):
        Y, m, n, d, matrix_in = format_system(A, Y)
        X = np.zeros((n, d)) if self.X0 is None else np.array(self.X0)
        if X.shape != (n, d):
            raise ValidationError("Must be shape %s, got %s"
                                  % ((n, d), X.shape), attr='X0', obj=self)

        damp = m * sigma**2
        rtol = self.tol * np.sqrt(m)
        G = lambda x: np.dot(A.T, np.dot(A, x)) + damp * x
        B = np.dot(A.T, Y)

        iters = -np.ones(d, dtype='int')
        for i in range(d):
            X[:, i], iters[i] = self._conjgrad_iters(
                G, B[:, i], X[:, i], maxiters=self.maxiters, rtol=rtol)

        info = {'rmses': rmses(A, X, Y), 'iterations': iters}
        return X if matrix_in else X.ravel(), info

    @staticmethod
    def _conjgrad_iters(calcAx, b, x, maxiters=None, rtol=1e-6):
        """Solve a single-RHS linear system using conjugate gradient."""

        if maxiters is None:
            maxiters = b.shape[0]

        r = b - calcAx(x)
        p = r.copy()
        rsold = np.dot(r, r)

        for i in range(maxiters):
            Ap = calcAx(p)
            alpha = rsold / np.dot(p, Ap)
            x += alpha * p
            r -= alpha * Ap

            rsnew = np.dot(r, r)
            beta = rsnew / rsold

            if np.sqrt(rsnew) < rtol:
                break

            if beta < 1e-12:  # no perceptible change in p
                break

            # p = r + beta*p
            p *= beta
            p += r
            rsold = rsnew

        return x, i+1
예제 #2
0
class Ensemble(NengoObject):
    """A group of neurons that collectively represent a vector.

    Parameters
    ----------
    n_neurons : int
        The number of neurons.
    dimensions : int
        The number of representational dimensions.
    radius : int, optional
        The representational radius of the ensemble.
    encoders : Distribution or ndarray (`n_neurons`, `dimensions`), optional
        The encoders, used to transform from representational space
        to neuron space. Each row is a neuron's encoder, each column is a
        representational dimension.
    intercepts : Distribution or ndarray (`n_neurons`), optional
        The point along each neuron's encoder where its activity is zero. If
        e is the neuron's encoder, then the activity will be zero when
        dot(x, e) <= c, where c is the given intercept.
    max_rates : Distribution or ndarray (`n_neurons`), optional
        The activity of each neuron when dot(x, e) = 1, where e is the neuron's
        encoder.
    eval_points : Distribution or ndarray (`n_eval_points`, `dims`), optional
        The evaluation points used for decoder solving, spanning the interval
        (-radius, radius) in each dimension, or a distribution from which to
        choose evaluation points. Default: ``UniformHypersphere``.
    n_eval_points : int, optional
        The number of evaluation points to be drawn from the `eval_points`
        distribution. If None (the default), then a heuristic is used to
        determine the number of evaluation points.
    neuron_type : Neurons, optional
        The model that simulates all neurons in the ensemble.
    noise : Process, optional
        Random noise injected directly into each neuron in the ensemble
        as current. A sample is drawn for each individual neuron on
        every simulation step.
    seed : int, optional
        The seed used for random number generation.
    label : str, optional
        A name for the ensemble. Used for debugging and visualization.
    """

    n_neurons = IntParam(default=None, low=1)
    dimensions = IntParam(default=None, low=1)
    radius = NumberParam(default=1.0, low=1e-10)
    neuron_type = NeuronTypeParam(default=LIF())
    encoders = DistOrArrayParam(default=UniformHypersphere(surface=True),
                                sample_shape=('n_neurons', 'dimensions'))
    intercepts = DistOrArrayParam(default=Uniform(-1.0, 1.0),
                                  optional=True,
                                  sample_shape=('n_neurons', ))
    max_rates = DistOrArrayParam(default=Uniform(200, 400),
                                 optional=True,
                                 sample_shape=('n_neurons', ))
    n_eval_points = IntParam(default=None, optional=True)
    eval_points = DistOrArrayParam(default=UniformHypersphere(),
                                   sample_shape=('*', 'dimensions'))
    bias = DistOrArrayParam(default=None,
                            optional=True,
                            sample_shape=('n_neurons', ))
    gain = DistOrArrayParam(default=None,
                            optional=True,
                            sample_shape=('n_neurons', ))
    noise = ProcessParam(default=None, optional=True)
    seed = IntParam(default=None, optional=True)
    label = StringParam(default=None, optional=True)

    def __init__(self,
                 n_neurons,
                 dimensions,
                 radius=Default,
                 encoders=Default,
                 intercepts=Default,
                 max_rates=Default,
                 eval_points=Default,
                 n_eval_points=Default,
                 neuron_type=Default,
                 gain=Default,
                 bias=Default,
                 noise=Default,
                 seed=Default,
                 label=Default):

        self.n_neurons = n_neurons
        self.dimensions = dimensions
        self.radius = radius
        self.encoders = encoders
        self.intercepts = intercepts
        self.max_rates = max_rates
        self.label = label
        self.n_eval_points = n_eval_points
        self.eval_points = eval_points
        self.bias = bias
        self.gain = gain
        self.neuron_type = neuron_type
        self.noise = noise
        self.seed = seed
        self._neurons = Neurons(self)

    def __getitem__(self, key):
        return ObjView(self, key)

    def __len__(self):
        return self.dimensions

    @property
    def neurons(self):
        return self._neurons

    @neurons.setter
    def neurons(self, dummy):
        raise AttributeError("neurons cannot be overwritten.")

    @property
    def probeable(self):
        return ["decoded_output", "input"]

    @property
    def size_in(self):
        return self.dimensions

    @property
    def size_out(self):
        return self.dimensions
예제 #3
0
class SpatiallyConstrainedConnectivity(ConstrainedConnectivity):
    """
    Same as "ConstrainedConnectivity", but with a default callback for the
    "probabilities" callback that computes connection probabilities based on the
    location of the neurons.
    """

    sigma = NumberParam(
        name="sigma",
        low=0.0,
        default=0.25,
        low_open=True,
        readonly=True,
    )

    projection = NdarrayParam(
        name="projection",
        default=np.zeros((0, )),
        optional=True,
        shape=('*', '*'),
        readonly=True,
    )

    @property
    def _argreprs(self):
        return super()._argreprs + [
            "sigma={}".format(self.sigma),
            "projection={}".format(self.projection),
        ]

    def get_probabilities(self, n_pre, n_post, pre_obj, post_obj, data):
        # Fetch the neuron locations
        xs_pre, xs_post = None, None

        # If the "locations" attribute is set
        if (pre_obj in data) and hasattr(data[pre_obj], 'locations'):
            xs_pre = data[pre_obj].locations
        if (post_obj in data) and hasattr(data[post_obj], 'locations'):
            xs_post = data[post_obj].locations

        # We cannot compute connectivity constraints if the locations are not
        # defined -- just use uniform connection probabilities (by returning
        # "None")
        if (xs_pre is None) or (xs_post is None):
            return None

        # Make sure the number of pre-neurons and the number of post-neurons
        # are correct
        if xs_pre.ndim != 2:
            raise ValueError(
                "Pre-population neuron locations must be a 2D array, "
                "but got {}D array".format(xs_pre.ndim))
        if xs_post.ndim != 2:
            raise ValueError(
                "Post-population neuron locations must be a 2D array, "
                "but got {}D array".format(xs_pre.ndim))
        if n_pre != xs_pre.shape[0]:
            raise ValueError(
                "Expected pre-population neuron location shape ({}, d_pre), "
                "but got ({}, d_pre)".format(n_pre, xs_pre.shape[0]))
        if n_post != xs_post.shape[0]:
            raise ValueError(
                "Expected post-population neuron location shape ({}, d_post), "
                "but got ({}, d_post)".format(n_post, xs_post.shape[0]))

        # Fetch the dimensionality of the neuron locations
        d_pre, d_post = xs_pre.shape[1], xs_post.shape[1]

        # Project the locations onto the minimum dimensionality
        d_min, d_max = min(d_pre, d_post), max(d_pre, d_post)
        P = np.eye(d_min,
                   d_max) if self.projection is None else self.projection

        # Make sure the projection vector has the correct size
        if (P.shape[0] != d_min and (d_min != d_max)) or (P.shape[1] != d_max):
            raise ValueError("Expected a projection matrix of size ({}, {}), "
                             "but got projection vector of shape {}".format(
                                 d_min, d_max, P.shape))

        # Apply the projection
        if xs_pre.shape[1] == d_max:
            xs_pre = xs_pre @ P.T
        if xs_post.shape[1] == d_max:
            xs_post = xs_post @ P.T

        # Compute the squared distance
        dists = np.sum(np.square(xs_pre[:, None] - xs_post[None, :]), axis=-1)

        # Apply exponential falloff
        return np.exp(-dists / np.square(self.sigma))

    def __init__(self,
                 convergence=None,
                 divergence=None,
                 probabilities=None,
                 sigma=0.25,
                 projection=None):

        # Call the inherited constructor
        super().__init__(convergence, divergence, probabilities)

        # Copy the sigma parameters
        self.sigma = sigma

        # Copy the projection parameter
        self.projection = projection

        # Copy the probabilities
        if probabilities is None:

            def get_probabilities_wrapper(*args, **kwargs):
                return self.get_probabilities(*args, **kwargs)

            self.probabilities = get_probabilities_wrapper
        else:
            self.probabilities = probabilities
예제 #4
0
class WhiteSignal(Process):
    """An ideal low-pass filtered white noise process.

    This signal is created in the frequency domain, and designed to have
    exactly equal power at all frequencies below the cut-off frequency,
    and no power above the cut-off.

    The signal is naturally periodic, so it can be used beyond its period
    while still being continuous with continuous derivatives.

    Parameters
    ----------
    period : float
        A white noise signal with this period will be generated.
        Samples will repeat after this duration.
    high : float
        The cut-off frequency of the low-pass filter, in Hz.
        Must not exceed the Nyquist frequency for the simulation
        timestep, which is ``0.5 / dt``.
    rms : float, optional (Default: 0.5)
        The root mean square power of the filtered signal
    y0 : float, optional (Default: None)
        Align the phase of each output dimension to begin at the value
        that is closest (in absolute value) to y0.
    seed : int, optional (Default: None)
        Random number seed. Ensures noise will be the same each run.
    """

    period = NumberParam('period', low=0, low_open=True)
    high = NumberParam('high', low=0, low_open=True)
    rms = NumberParam('rms', low=0, low_open=True)
    y0 = NumberParam('y0', optional=True)

    def __init__(self, period, high, rms=0.5, y0=None, **kwargs):
        super().__init__(default_size_in=0, **kwargs)
        self.period = period
        self.high = high
        self.rms = rms
        self.y0 = y0

        if self.high is not None and self.high < 1. / self.period:
            raise ValidationError(
                "Make ``high >= 1. / period`` to produce a non-zero signal",
                attr='high',
                obj=self)

    def __repr__(self):
        return "%s(period=%r, high=%r, rms=%r)" % (
            type(self).__name__, self.period, self.high, self.rms)

    def make_step(self, shape_in, shape_out, dt, rng):
        assert shape_in == (0, )

        nyquist_cutoff = 0.5 / dt
        if self.high > nyquist_cutoff:
            raise ValidationError("High must not exceed the Nyquist frequency "
                                  "for the given dt (%0.3f)" % nyquist_cutoff,
                                  attr='high',
                                  obj=self)

        n_coefficients = int(np.ceil(self.period / dt / 2.))
        shape = (n_coefficients + 1, ) + shape_out
        sigma = self.rms * np.sqrt(0.5)
        coefficients = 1j * rng.normal(0., sigma, size=shape)
        coefficients += rng.normal(0., sigma, size=shape)
        coefficients[0] = 0.
        coefficients[-1].imag = 0.

        set_to_zero = npext.rfftfreq(2 * n_coefficients, d=dt) > self.high
        coefficients[set_to_zero] = 0.
        power_correction = np.sqrt(1. - np.sum(set_to_zero, dtype=float) /
                                   n_coefficients)
        if power_correction > 0.:
            coefficients /= power_correction
        coefficients *= np.sqrt(2 * n_coefficients)
        signal = np.fft.irfft(coefficients, axis=0)

        if self.y0 is not None:
            # Starts each dimension off where it is closest to y0
            def shift(x):
                offset = np.argmin(abs(self.y0 - x))
                return np.roll(x, -offset + 1)  # +1 since t starts at dt

            signal = np.apply_along_axis(shift, 0, signal)

        def step_whitesignal(t):
            i = int(round(t / dt))
            return signal[i % signal.shape[0]]

        return step_whitesignal
예제 #5
0
class SSPState(Network):
    """Represents a single vector, with optional memory.
    This is a minimal SPA network, useful for passing data along (for example,
    visual input).
    Parameters
    ----------
    vocab : Vocabulary or int
        The vocabulary to use to interpret the vector. If an integer is given,
        the default vocabulary of that dimensionality will be used.
    subdimensions : int, optional (Default: 16)
        The dimension of the individual ensembles making up the vector.
        Must divide *dimensions* evenly. The number of sub-ensembles
        will be ``dimensions // subdimensions``.
    neurons_per_dimension : int, optional (Default: 50)
        Number of neurons per dimension. Each ensemble will have
        ``neurons_per_dimension * subdimensions`` neurons, for a total of
        ``neurons_per_dimension * dimensions`` neurons.
    feedback : float, optional (Default: 0.0)
        Gain of feedback connection. Set to 1.0 for perfect memory,
        or 0.0 for no memory. Values in between will create a decaying memory.
    represent_cc_identity : bool, optional
        Whether to use optimizations to better represent the circular
        convolution identity vector. If activated, the `.IdentityEnsembleArray`
        will be used internally, otherwise a normal
        `nengo.networks.EnsembleArray` split up regularly according to
        *subdimensions*.
    feedback_synapse : float, optional (Default: 0.1)
        The synapse on the feedback connection.
    **kwargs : dict
        Keyword arguments passed through to `nengo_spa.Network`.
    Attributes
    ----------
    input : nengo.Node
        Input.
    output : nengo.Node
        Output.
    """

    vocab = VocabularyOrDimParam("vocab", default=None, readonly=True)
    subdimensions = IntParam("subdimensions", default=16, low=1, readonly=True)
    neurons_per_dimension = IntParam("neurons_per_dimension",
                                     default=50,
                                     low=1,
                                     readonly=True)
    feedback = NumberParam("feedback", default=0.0, readonly=True)
    feedback_synapse = NumberParam("feedback_synapse",
                                   default=0.1,
                                   readonly=True)
    represent_cc_identity = BoolParam("represent_cc_identity",
                                      default=True,
                                      readonly=True)

    def __init__(
            self,
            phis,
            angles,
            encoder_rng=np.random,
            vocab=Default,
            # subdimensions=Default,
            neurons_per_dimension=Default,
            feedback=Default,
            represent_cc_identity=Default,
            feedback_synapse=Default,
            limit_low=-5,
            limit_high=-5,
            **kwargs):
        super(SSPState, self).__init__(**kwargs)

        self.vocab = vocab
        self.subdimensions = 6
        self.neurons_per_dimension = neurons_per_dimension
        self.feedback = feedback
        self.feedback_synapse = feedback_synapse
        self.represent_cc_identity = represent_cc_identity

        dimensions = self.vocab.dimensions

        coord_rot_mat = get_coord_rot_mat(dimensions)
        inv_coord_rot_mat = np.linalg.pinv(coord_rot_mat)

        origin = np.zeros((dimensions, ))
        origin[0] = 1
        rot_origin = origin @ coord_rot_mat.T
        origin_back = rot_origin @ inv_coord_rot_mat.T
        offset_vec = origin - origin_back

        # this offset only works for odd dimensions
        # offset_vec = np.ones((dimensions,)) * 1. / dimensions

        if ((dimensions - 1) % self.subdimensions != 0) and (
            (dimensions - 2) % self.subdimensions != 0):
            raise ValidationError(
                "Dimensions (%d) must be divisible by subdimensions (%d)" %
                (dimensions, self.subdimensions),
                attr="dimensions",
                obj=self,
            )

        with self:
            # if self.represent_cc_identity:
            #     self.state_ensembles = IdentityEnsembleArray(
            #         self.neurons_per_dimension,
            #         dimensions,
            #         self.subdimensions,
            #         label="ssp state",
            #     )
            # else:
            #     self.state_ensembles = EnsembleArray(
            #         self.neurons_per_dimension * self.subdimensions,
            #         dimensions // self.subdimensions,
            #         ens_dimensions=self.subdimensions,
            #         eval_points=nengo.dists.CosineSimilarity(dimensions + 2),
            #         intercepts=nengo.dists.CosineSimilarity(dimensions + 2),
            #         label="ssp state",
            #     )

            # the dimensionality with the constant(s) removed
            reduced_dim = coord_rot_mat.shape[0]
            n_toroids = len(phis)
            assert n_toroids == reduced_dim // self.subdimensions

            self.state_ensembles = EnsembleArray(
                self.neurons_per_dimension * self.subdimensions,
                n_toroids,
                ens_dimensions=self.subdimensions,
                # radius=2./dimensions,
                radius=1.,
                # eval_points=nengo.dists.CosineSimilarity(dimensions + 2),
                # intercepts=nengo.dists.CosineSimilarity(dimensions + 2),
                label="ssp state",
            )
            n_neurons = self.neurons_per_dimension * self.subdimensions
            # set the intercepts/encoders/eval points based on orientation and angle
            for k in range(n_toroids):
                preferred_locations = hilbert_2d(limit_low,
                                                 limit_high,
                                                 n_neurons,
                                                 encoder_rng,
                                                 p=8,
                                                 N=2,
                                                 normal_std=3)
                encoders_grid_cell = np.zeros((n_neurons, dimensions))
                for n in range(n_neurons):
                    encoders_grid_cell[n, :] = grid_cell_encoder(
                        location=preferred_locations[n, :],
                        dim=dimensions,
                        phi=phis[k],
                        angle=angles[k],
                        toroid_index=k)

                # rotate, shift, and slice out relevant dimensions
                encoders_transformed = (
                    encoders_grid_cell @ coord_rot_mat.T)[:, k * 6:(k + 1) *
                                                          6].copy()

                self.state_ensembles.ea_ensembles[
                    k].intercepts = nengo.dists.Uniform(0, 1)
                self.state_ensembles.ea_ensembles[
                    k].encoders = encoders_transformed * (dimensions / 2.)
                # scaling eval points by the radius, so when they are rescaled later they are correct
                self.state_ensembles.ea_ensembles[
                    k].eval_points = encoders_transformed * (dimensions / 2.)
                # self.state_ensembles.ea_ensembles[k].normalize_encoders = False

            if self.feedback is not None and self.feedback != 0.0:
                nengo.Connection(
                    self.state_ensembles.output,
                    self.state_ensembles.input,
                    transform=self.feedback,
                    synapse=self.feedback_synapse,
                )

        # Apply coordinate transform on the input and output

        self.input = nengo.Node(size_in=dimensions, label="input")
        self.output = nengo.Node(size_in=dimensions, label="output")

        # fixed offset to push the result back into the unitary space
        self.offset = nengo.Node(offset_vec)

        nengo.Connection(self.input,
                         self.state_ensembles.input,
                         transform=coord_rot_mat * (dimensions / 2.))
        nengo.Connection(self.state_ensembles.output,
                         self.output,
                         transform=inv_coord_rot_mat / (dimensions / 2.))
        nengo.Connection(self.offset, self.output)

        # self.input = self.state_ensembles.input
        # self.output = self.state_ensembles.output
        self.declare_input(self.input, self.vocab)
        self.declare_output(self.output, self.vocab)
class LstsqL1(Solver):
    """Least-squares solver with L1 and L2 regularization (elastic net).

    This method is well suited for creating sparse decoders or weight matrices.
    """

    compositional = False

    l1 = NumberParam("l1", low=0)
    l2 = NumberParam("l2", low=0)

    def __init__(self, weights=False, l1=1e-4, l2=1e-6, max_iter=1000):
        """
        .. note:: Requires `scikit-learn <https://scikit-learn.org/stable/>`_.

        Parameters
        ----------
        weights : bool, optional
            If False, solve for decoders. If True, solve for weights.
        l1 : float, optional
            Amount of L1 regularization.
        l2 : float, optional
            Amount of L2 regularization.
        max_iter : int, optional
            Maximum number of iterations for the underlying elastic net.

        Attributes
        ----------
        l1 : float
            Amount of L1 regularization.
        l2 : float
            Amount of L2 regularization.
        weights : bool
            If False, solve for decoders. If True, solve for weights.
        max_iter : int
            Maximum number of iterations for the underlying elastic net.
        """
        import sklearn.linear_model  # noqa F401, import to check existence

        assert sklearn.linear_model
        super().__init__(weights=weights)
        self.l1 = l1
        self.l2 = l2
        self.max_iter = max_iter

    def __call__(self, A, Y, rng=np.random):
        import sklearn.linear_model

        tstart = time.time()
        Y = np.array(Y)  # copy since 'fit' may modify Y

        # TODO: play around with regularization constants (I just guessed).
        #   Do we need to scale regularization by number of neurons, to get
        #   same level of sparsity? esp. with weights? Currently, setting
        #   l1=1e-3 works well with weights when connecting 1D populations
        #   with 100 neurons each.
        a = self.l1 * A.max()  # L1 regularization
        b = self.l2 * A.max()**2  # L2 regularization
        alpha = a + b
        l1_ratio = a / (a + b)

        # --- solve least-squares A * X = Y
        model = sklearn.linear_model.ElasticNet(alpha=alpha,
                                                l1_ratio=l1_ratio,
                                                fit_intercept=False,
                                                max_iter=self.max_iter)
        model.fit(A, Y)
        X = model.coef_.T
        X.shape = (A.shape[1], Y.shape[1]) if Y.ndim > 1 else (A.shape[1], )
        t = time.time() - tstart
        infos = {"rmses": rmses(A, X, Y), "time": t}
        return X, infos
예제 #7
0
파일: neurons.py 프로젝트: qitsweauca/nengo
class LIFRate(NeuronType):
    """Non-spiking version of the leaky integrate-and-fire (LIF) neuron model.

    Parameters
    ----------
    tau_rc : float
        Membrane RC time constant, in seconds. Affects how quickly the membrane
        voltage decays to zero in the absence of input (larger = slower decay).
    tau_ref : float
        Absolute refractory period, in seconds. This is how long the
        membrane voltage is held at zero after a spike.
    amplitude : float
        Scaling factor on the neuron output. Corresponds to the relative
        amplitude of the output spikes of the neuron.
    """

    probeable = ('rates', )

    tau_rc = NumberParam('tau_rc', low=0, low_open=True)
    tau_ref = NumberParam('tau_ref', low=0)
    amplitude = NumberParam('amplitude', low=0, low_open=True)

    def __init__(self, tau_rc=0.02, tau_ref=0.002, amplitude=1):
        super(LIFRate, self).__init__()
        self.tau_rc = tau_rc
        self.tau_ref = tau_ref
        self.amplitude = amplitude

    @property
    def _argreprs(self):
        args = []
        if self.tau_rc != 0.02:
            args.append("tau_rc=%s" % self.tau_rc)
        if self.tau_ref != 0.002:
            args.append("tau_ref=%s" % self.tau_ref)
        return args

    def gain_bias(self, max_rates, intercepts):
        """Analytically determine gain, bias."""
        max_rates = np.array(max_rates, dtype=float, copy=False, ndmin=1)
        intercepts = np.array(intercepts, dtype=float, copy=False, ndmin=1)

        inv_tau_ref = 1. / self.tau_ref if self.tau_ref > 0 else np.inf
        if np.any(max_rates > inv_tau_ref):
            raise ValidationError("Max rates must be below the inverse "
                                  "refractory period (%0.3f)" % inv_tau_ref,
                                  attr='max_rates',
                                  obj=self)

        x = 1.0 / (1 - np.exp(
            (self.tau_ref - (1.0 / max_rates)) / self.tau_rc))
        gain = (1 - x) / (intercepts - 1.0)
        bias = 1 - gain * intercepts
        return gain, bias

    def max_rates_intercepts(self, gain, bias):
        """Compute the inverse of gain_bias."""
        intercepts = (1 - bias) / gain
        max_rates = 1.0 / (self.tau_ref -
                           self.tau_rc * np.log1p(1.0 /
                                                  (gain *
                                                   (intercepts - 1) - 1)))
        if not np.all(np.isfinite(max_rates)):
            warnings.warn("Non-finite values detected in `max_rates`; this "
                          "probably means that `gain` was too small.")
        return max_rates, intercepts

    def rates(self, x, gain, bias):
        """Always use LIFRate to determine rates."""
        J = self.current(x, gain, bias)
        out = np.zeros_like(J)
        # Use LIFRate's step_math explicitly to ensure rate approximation
        LIFRate.step_math(self, dt=1, J=J, output=out)
        return out

    def step_math(self, dt, J, output):
        """Implement the LIFRate nonlinearity."""
        j = J - 1
        output[:] = 0  # faster than output[j <= 0] = 0
        output[j > 0] = self.amplitude / (
            self.tau_ref + self.tau_rc * np.log1p(1. / j[j > 0]))
예제 #8
0
class Triangle(Synapse):
    """Triangular finite impulse response (FIR) synapse.

    This synapse has a triangular and finite impulse response. The length of
    the triangle is ``t`` seconds; thus the digital filter will have
    ``t / dt + 1`` taps.

    Parameters
    ----------
    t : float
        Length of the triangle, in seconds.

    Attributes
    ----------
    t : float
        Length of the triangle, in seconds.
    """

    t = NumberParam("t", low=0)

    def __init__(self, t, **kwargs):
        super().__init__(**kwargs)
        self.t = t

    def _get_coefficients(self, dt, dtype=None):
        dtype = rc.float_dtype if dtype is None else np.dtype(dtype)

        n_taps = int(np.round(self.t / float(dt))) + 1
        num = np.arange(n_taps, 0, -1, dtype=rc.float_dtype)
        num /= num.sum()

        # Minimal multiply implementation finds the difference between
        # coefficients and subtracts a scaled signal at each time step.
        n0, ndiff = num[0], num[-1]

        return n_taps, n0, ndiff

    def make_state(self, shape_in, shape_out, dt, dtype=None, y0=0):
        assert shape_in == shape_out
        dtype = rc.float_dtype if dtype is None else np.dtype(dtype)

        n_taps, _, ndiff = self._get_coefficients(dt, dtype=dtype)
        Y = np.zeros(shape_out, dtype=dtype)
        X = np.zeros((n_taps, ) + shape_out, dtype=dtype)
        Xi = np.zeros(1, dtype=dtype)  # counter for X position

        if y0 != 0 and len(X) > 0:
            y0 = np.array(y0, copy=False, ndmin=1)
            X[:] = ndiff * y0[None, ...]
            Y[:] = y0

        return {"Y": Y, "X": X, "Xi": Xi}

    def make_step(self, shape_in, shape_out, dt, rng, state):
        assert shape_in == shape_out
        assert state is not None

        Y, X, Xi = state["Y"], state["X"], state["Xi"]
        n_taps, n0, ndiff = self._get_coefficients(dt, dtype=Y.dtype)
        assert len(X) == n_taps

        def step_triangle(t, signal):
            Y[...] += n0 * signal
            Y[...] -= X.sum(axis=0)
            Xi[:] = (Xi + 1) % len(X)
            X[int(Xi.item())] = ndiff * signal
            return Y

        return step_triangle
예제 #9
0
class LIFRate(NeuronType):
    """Non-spiking version of the leaky integrate-and-fire (LIF) neuron model.

    Parameters
    ----------
    tau_rc : float
        Membrane RC time constant, in seconds. Affects how quickly the membrane
        voltage decays to zero in the absence of input (larger = slower decay).
    tau_ref : float
        Absolute refractory period, in seconds. This is how long the
        membrane voltage is held at zero after a spike.
    amplitude : float
        Scaling factor on the neuron output. Corresponds to the relative
        amplitude of the output spikes of the neuron.
    initial_state : {str: Distribution or array_like}
        Mapping from state variables names to their desired initial value.
        These values will override the defaults set in the class's state attribute.
    """

    state = {"rates": Choice([0])}
    negative = False

    tau_rc = NumberParam("tau_rc", low=0, low_open=True)
    tau_ref = NumberParam("tau_ref", low=0)
    amplitude = NumberParam("amplitude", low=0, low_open=True)

    def __init__(self, tau_rc=0.02, tau_ref=0.002, amplitude=1, initial_state=None):
        super().__init__(initial_state)
        self.tau_rc = tau_rc
        self.tau_ref = tau_ref
        self.amplitude = amplitude

    def gain_bias(self, max_rates, intercepts):
        """Analytically determine gain, bias."""
        max_rates = np.array(max_rates, dtype=float, copy=False, ndmin=1)
        intercepts = np.array(intercepts, dtype=float, copy=False, ndmin=1)

        inv_tau_ref = 1.0 / self.tau_ref if self.tau_ref > 0 else np.inf
        if not np.all(max_rates < inv_tau_ref):
            raise ValidationError(
                "Max rates must be below the inverse "
                "refractory period (%0.3f)" % inv_tau_ref,
                attr="max_rates",
                obj=self,
            )

        x = 1.0 / (1 - np.exp((self.tau_ref - (1.0 / max_rates)) / self.tau_rc))
        gain = (1 - x) / (intercepts - 1.0)
        bias = 1 - gain * intercepts
        return gain, bias

    def max_rates_intercepts(self, gain, bias):
        """Compute the inverse of gain_bias."""
        intercepts = (1 - bias) / gain
        max_rates = 1.0 / (
            self.tau_ref - self.tau_rc * np.log1p(1.0 / (gain * (intercepts - 1) - 1))
        )
        if not np.all(np.isfinite(max_rates)):
            warnings.warn(
                "Non-finite values detected in `max_rates`; this "
                "probably means that `gain` was too small."
            )
        return max_rates, intercepts

    def rates(self, x, gain, bias):
        """Always use LIFRate to determine rates."""
        J = self.current(x, gain, bias)
        out = np.zeros_like(J)
        # Use LIFRate's step explicitly to ensure rate approximation
        LIFRate.step(self, dt=1, J=J, rates=out)
        return out

    def step(self, dt, J, rates):
        """Implement the LIFRate nonlinearity."""
        j = J - 1
        rates[:] = 0  # faster than output[j <= 0] = 0
        rates[j > 0] = self.amplitude / (
            self.tau_ref + self.tau_rc * np.log1p(1.0 / j[j > 0])
        )
예제 #10
0
class WilsonEuler(NeuronType):
    '''
    Todo: nice description
    '''
    probeable = ('spikes', 'voltage', 'recovery', 'conductance', 'AP')
    threshold = NumberParam('threshold')
    tau_V = NumberParam('tau_V')
    tau_R = NumberParam('tau_R')
    tau_H = NumberParam('tau_H')

    _v0 = -0.754  # initial voltage
    _r0 = 0.279  # initial recovery
    _maxJ = 2.0  # clip input current at this maximum to avoid catastrophic shutdown

    def __init__(self,
                 threshold=-0.20,
                 tau_V=0.00097,
                 tau_R=0.0056,
                 tau_H=0.0990):
        super(WilsonEuler, self).__init__()
        self.threshold = threshold
        self.tau_V = tau_V
        self.tau_R = tau_R
        self.tau_H = tau_H

    @property
    def _argreprs(self):
        args = []

        def add(attr, default):
            if getattr(self, attr) != default:
                args.append("%s=%s" % (attr, getattr(self, attr)))

        add("threshold", -0.20)
        add("tau_V", 0.00097)
        add("tau_R", 0.0056)
        add("tau_H", 0.0990)
        return args

    def gain_bias(self, max_rates, intercepts):
        max_rates = np.array(max_rates, dtype=float, copy=False, ndmin=1)
        intercepts = np.array(intercepts, dtype=float, copy=False, ndmin=1)
        J_steps = 201  # Odd number so that 0 is a sample
        max_rate = max_rates.max()
        # Find range of J that will achieve max rates (assume monotonic)
        J_threshold = None
        J_max = None
        Jr = 1.0
        for _ in range(10):
            J = np.linspace(-Jr, Jr, J_steps)
            rate = self.rates(J, np.ones(J_steps), np.zeros(J_steps))
            #             print('J', J, 'euler rate', rate)
            if J_threshold is None and (rate <= 0).any():
                J_threshold = J[np.where(rate <= 0)[0][-1]]
            if J_max is None and (rate >= max_rate).any():
                J_max = J[np.where(rate >= max_rate)[0][0]]
            if J_threshold is not None and J_max is not None:
                break
            else:
                Jr *= 2
        else:
            if J_threshold is None:
                raise RuntimeError("Could not find firing threshold")
            if J_max is None:
                raise RuntimeError("Could not find max current")

        J = np.linspace(J_threshold, J_max, J_steps)
        rate = self.rates(J, np.ones(J_steps), np.zeros(J_steps))
        gain = np.zeros_like(max_rates)
        bias = np.zeros_like(max_rates)
        J_tops = np.interp(max_rates, rate, J)
        gain[:] = (J_threshold - J_tops) / (intercepts - 1)
        bias[:] = J_tops - gain
        return gain, bias

    def max_rates_intercepts(self, gain, bias):
        return np.zeros_like(gain), np.zeros_like(bias)

    def rates(self, x, gain, bias):
        """Estimates steady-state firing rate given gain and bias."""
        J = self.current(x, gain, bias)
        voltage = self._v0 * np.ones_like(J)
        recovery = self._r0 * np.ones_like(J)
        conductance = np.zeros_like(J)
        AP = np.zeros_like(J, dtype=bool)

        return settled_firingrate(self.step_math,
                                  J, [voltage, recovery, conductance, AP],
                                  dt=0.000025,
                                  settle_time=0.1,
                                  sim_time=1.0)

    def step_math(self, dt, J, spiked, V, R, H, AP):
        # must use dt<0.0001 to avoid numerical errors
        # if np.abs(J).any() >= 2.0:
        #     warnings.warn("input current exceeds failure point; clipping")
        #     J = J.clip(max=self._maxJ)
        dV = -(17.81 + 47.58 * V + 33.80 * np.square(V)) * (
            V - 0.48) - 26 * R * (V + 0.95) - 13 * H * (V + 0.95) + J
        dR = -R + 1.29 * V + 0.79 + 3.30 * np.square(V + 0.38)
        dH = -H + 11 * (V + 0.754) * (V + 0.69)
        V[:] = (V + dV * dt / self.tau_V).clip(-0.9, 0.3)
        R[:] = (R + dR * dt / self.tau_R)  # .clip(0.18, 0.42)
        H[:] = (H + dH * dt / self.tau_H)  # .clip(0, 0.23)
        spiked[:] = (V > self.threshold) & (~AP)
        spiked /= dt
        AP[:] = V > self.threshold
        return spiked, V, R, H, AP
예제 #11
0
class WilsonRungeKutta(NeuronType):

    probeable = ('spikes', 'voltage', 'recovery', 'conductance')
    threshold = NumberParam('threshold')
    tau_V = NumberParam('tau_V')
    tau_R = NumberParam('tau_R')
    tau_H = NumberParam('tau_H')

    _v0 = -0.754  # initial voltage
    _r0 = 0.279  # initial recovery
    _maxJ = 2.0  # clip input current at this maximum to avoid catastrophic shutdown

    def __init__(self,
                 threshold=-0.20,
                 tau_V=0.00097,
                 tau_R=0.0056,
                 tau_H=0.0990):
        super(WilsonRungeKutta, self).__init__()
        self.threshold = threshold
        self.tau_V = tau_V
        self.tau_R = tau_R
        self.tau_H = tau_H

        # TODO(arvoelke): Try replacing this solver with something like
        # http://www2.gsu.edu/~matrhc/PyDSTool.htm
        # The main consideration is that we need a callback to count spikes
        from scipy.integrate import ode
        self.solver = ode(self._ode_fun).set_integrator(
            'dopri5', first_step=0.000025, nsteps=100, rtol=1e-2,
            atol=1e-3)  # runge-kutta method of order (4)5

    def rates(self, x, gain, bias):
        """Estimates steady-state firing rate given gain and bias."""
        J = self.current(x, gain, bias)
        voltage = self._v0 * np.ones_like(J)
        recovery = self._r0 * np.ones_like(J)
        conductance = np.zeros_like(J)

        return settled_firingrate(self.step_math,
                                  J, [voltage, recovery, conductance],
                                  dt=0.001,
                                  settle_time=0.1,
                                  sim_time=1.0)

    def _ode_fun(self, dummy_t, y, J):  # first argument to scipy.integrate.ode
        V, R, H = np.split(y, 3)
        dV = (-(17.81 + 47.58 * V + 33.80 * np.square(V)) * (V - 0.48) -
              26 * R * (V + 0.95) - 13 * H * (V + 0.95) + J)
        dR = -R + 1.29 * V + 0.79 + 3.30 * np.square(V + 0.38)
        dH = -H + 11 * (V + 0.754) * (V + 0.69)
        return np.concatenate(
            (dV / self.tau_V, dR / self.tau_R, dH / self.tau_H))

    def step_math(self, dt, J, spiked, V, R, H):
        # It's a little silly to be reinitializing the solver on
        # every time-step, but any other ways that I could think of would
        # violate the nengo builder's assumption that the neuron's state is
        # encapsulated by the signals in SimNeurons
        self.solver.set_initial_value(np.concatenate((V, R, H)))
        self.solver.set_f_params(J.clip(max=self._maxJ))

        spiked[:] = 0
        AP = V > self.threshold

        def spike_detector(dummy_t, y):  # callback for each sub-step
            V_t = y[:len(V)] > self.threshold
            spiked[:] += V_t & (~AP)  # note the "+="
            AP[:] = V_t

        self.solver.set_solout(spike_detector)

        V[:], R[:], H[:] = np.split(self.solver.integrate(self.solver.t + dt),
                                    3)
        if not self.solver.successful():
            raise ValueError("ODE solver failed with status code: %d" %
                             (self.solver.get_return_code()))
        spiked[:] /= dt

        return spiked, V, R, H
예제 #12
0
class AdaptiveLIFT(LIF):
    ''' Aaron Voelker, https://github.com/nengo/nengo/issues/1423'''

    probeable = ('spikes', 'voltage', 'refractory_time', 'threshold')

    min_voltage = NumberParam('min_voltage', high=0)
    tau_adapt = NumberParam('tau_adapt', low=0)
    inc_adapt = NumberParam('inc_adapt', low=0)

    def __init__(self,
                 tau_rc=0.02,
                 tau_ref=0.002,
                 min_voltage=0,
                 amplitude=1,
                 tau_adapt=0.1,
                 inc_adapt=0.1):
        super(AdaptiveLIFT, self).__init__(tau_rc=tau_rc,
                                           tau_ref=tau_ref,
                                           amplitude=amplitude)
        # self.min_voltage = min_voltage
        self.tau_adapt = tau_adapt
        self.inc_adapt = inc_adapt

    def rates(self, x, gain, bias):
        """Estimates steady-state firing rate given gain and bias."""
        J = self.current(x, gain, bias)
        voltage = np.zeros_like(gain)
        refractory_time = np.zeros_like(gain)
        threshold = np.ones_like(gain)

        return settled_firingrate(self.step_math,
                                  J, [voltage, refractory_time, threshold],
                                  dt=0.001,
                                  settle_time=0.3,
                                  sim_time=1.0)

    def gain_bias(self, max_rates, intercepts):
        return NeuronType.gain_bias(self, max_rates, intercepts)

    def max_rates_intercepts(self, gain, bias):
        return NeuronType.max_rates_intercepts(self, gain, bias)

    def step_math(self, dt, J, spiked, voltage, refractory_time, threshold):
        # reduce all refractory times by dt
        refractory_time -= dt

        # compute effective dt for each neuron, based on remaining time.
        # note that refractory times that have completed midway into this
        # timestep will be given a partial timestep, and moreover these will
        # be subtracted to zero at the next timestep (or reset by a spike)
        delta_t = (dt - refractory_time).clip(0, dt)

        # update voltage using discretized lowpass filter
        # since v(t) = v(0) + (J - v(0))*(1 - exp(-t/tau)) assuming
        # J is constant over the interval [t, t + dt)
        voltage -= (J - voltage) * np.expm1(-delta_t / self.tau_rc)

        # determine which neurons spiked (set them to 1/dt, else 0)
        spiked_mask = voltage > threshold
        spiked[:] = spiked_mask * (self.amplitude / dt)

        # set v(0) = threshold and solve for t to compute the spike time
        # TODO: not sure if this mask is the right way to handle log domain errors
        threshold_spiked = threshold[spiked_mask]
        m = (voltage[spiked_mask] - threshold_spiked) / (J[spiked_mask] -
                                                         threshold_spiked)
        t_spike = np.zeros_like(m)
        t_spike[m < 1] = dt + self.tau_rc * np.log1p(-m[m < 1])

        # update threshold using discretized lowpass filter
        # applied to the input 1 + spiked * inc_adapt
        threshold -= ((1 + self.inc_adapt * spiked - threshold) *
                      np.expm1(-dt / self.tau_adapt))

        # set spiked voltages to zero, refractory times to tau_ref, and
        # rectify negative voltages to a floor of min_voltage
        voltage[voltage < self.min_voltage] = self.min_voltage
        voltage[spiked_mask] = 0
        refractory_time[spiked_mask] = self.tau_ref + t_spike
예제 #13
0
class Oja(LearningRuleType):
    """Oja learning rule.

    Modifies connection weights according to the Hebbian Oja rule, which
    augments typicaly Hebbian coactivity with a "forgetting" term that is
    proportional to the weight of the connection and the square of the
    postsynaptic activity.

    Parameters
    ----------
    pre_tau : float, optional (Default: 0.005)
        Filter constant on activities of neurons in pre population.
    post_tau : float, optional (Default: None)
        Filter constant on activities of neurons in post population.
        If None, post_tau will be the same as pre_tau.
    beta : float, optional (Default: 1.0)
        A scalar weight on the forgetting term.
    learning_rate : float, optional (Default: 1e-6)
        A scalar indicating the rate at which weights will be adjusted.

    Attributes
    ----------
    beta : float
        A scalar weight on the forgetting term.
    learning_rate : float
        A scalar indicating the rate at which weights will be adjusted.
    post_tau : float
        Filter constant on activities of neurons in post population.
    pre_tau : float
        Filter constant on activities of neurons in pre population.
    """

    error_type = 'none'
    modifies = 'weights'
    probeable = ('pre_filtered', 'post_filtered', 'delta')

    pre_tau = NumberParam('pre_tau', low=0, low_open=True)
    post_tau = NumberParam('post_tau', low=0, low_open=True)
    beta = NumberParam('beta', low=0)

    def __init__(self,
                 pre_tau=0.005,
                 post_tau=None,
                 beta=1.0,
                 learning_rate=1e-6):
        self.pre_tau = pre_tau
        self.post_tau = post_tau if post_tau is not None else pre_tau
        self.beta = beta
        super(Oja, self).__init__(learning_rate)

    @property
    def _argreprs(self):
        args = []
        if self.pre_tau != 0.005:
            args.append("pre_tau=%f" % self.pre_tau)
        if self.post_tau != self.pre_tau:
            args.append("post_tau=%f" % self.post_tau)
        if self.beta != 1.0:
            args.append("beta=%f" % self.beta)
        if self.learning_rate != 1e-6:
            args.append("learning_rate=%g" % self.learning_rate)
        return args
예제 #14
0
class BCM(LearningRuleType):
    """Bienenstock-Cooper-Munroe learning rule.

    Modifies connection weights as a function of the presynaptic activity
    and the difference between the postsynaptic activity and the average
    postsynaptic activity.

    Parameters
    ----------
    theta_tau : float, optional (Default: 1.0)
        A scalar indicating the time constant for theta integration.
    pre_tau : float, optional (Default: 0.005)
        Filter constant on activities of neurons in pre population.
    post_tau : float, optional (Default: None)
        Filter constant on activities of neurons in post population.
        If None, post_tau will be the same as pre_tau.
    learning_rate : float, optional (Default: 1e-9)
        A scalar indicating the rate at which weights will be adjusted.

    Attributes
    ----------
    learning_rate : float
        A scalar indicating the rate at which weights will be adjusted.
    post_tau : float
        Filter constant on activities of neurons in post population.
    pre_tau : float
        Filter constant on activities of neurons in pre population.
    theta_tau : float
        A scalar indicating the time constant for theta integration.
    """

    error_type = 'none'
    modifies = 'weights'
    probeable = ('theta', 'pre_filtered', 'post_filtered', 'delta')

    pre_tau = NumberParam('pre_tau', low=0, low_open=True)
    post_tau = NumberParam('post_tau', low=0, low_open=True)
    theta_tau = NumberParam('theta_tau', low=0, low_open=True)

    def __init__(self,
                 pre_tau=0.005,
                 post_tau=None,
                 theta_tau=1.0,
                 learning_rate=1e-9):
        self.theta_tau = theta_tau
        self.pre_tau = pre_tau
        self.post_tau = post_tau if post_tau is not None else pre_tau
        super(BCM, self).__init__(learning_rate)

    @property
    def _argreprs(self):
        args = []
        if self.pre_tau != 0.005:
            args.append("pre_tau=%f" % self.pre_tau)
        if self.post_tau != self.pre_tau:
            args.append("post_tau=%f" % self.post_tau)
        if self.theta_tau != 1.0:
            args.append("theta_tau=%f" % self.theta_tau)
        if self.learning_rate != 1e-9:
            args.append("learning_rate=%g" % self.learning_rate)
        return args
예제 #15
0
class LearningRuleType(FrozenObject):
    """Base class for all learning rule objects.

    To use a learning rule, pass it as a ``learning_rule_type`` keyword
    argument to the `~nengo.Connection` on which you want to do learning.

    Each learning rule exposes two important pieces of metadata that the
    builder uses to determine what information should be stored.

    The ``size_in`` is the dimensionality of the incoming error signal. It
    can either take an integer or one of the following string values:

    * ``'pre'``: vector error signal in pre-object space
    * ``'post'``: vector error signal in post-object space
    * ``'mid'``: vector error signal in the ``conn.size_mid`` space
    * ``'pre_state'``: vector error signal in pre-synaptic ensemble space
    * ``'post_state'``: vector error signal in pre-synaptic ensemble space

    The difference between ``'post_state'`` and ``'post'`` is that with the
    former, if a ``Neurons`` object is passed, it will use the dimensionality
    of the corresponding ``Ensemble``, whereas the latter simply uses the
    ``post`` object ``size_in``. Similarly with ``'pre_state'`` and ``'pre'``.

    The ``modifies`` attribute denotes the signal targeted by the rule.
    Options are:

    * ``'encoders'``
    * ``'decoders'``
    * ``'weights'``

    Parameters
    ----------
    learning_rate : float, optional (Default: 1e-6)
        A scalar indicating the rate at which ``modifies`` will be adjusted.
    size_in : int, str, optional (Default: 0)
        Dimensionality of the error signal (see above).

    Attributes
    ----------
    learning_rate : float
        A scalar indicating the rate at which ``modifies`` will be adjusted.
    size_in : int, str
        Dimensionality of the error signal.
    modifies : str
        The signal targeted by the learning rule.
    """

    modifies = None
    probeable = ()

    learning_rate = NumberParam('learning_rate', low=0)
    size_in = LearningRuleTypeSizeInParam('size_in', low=0)

    def __init__(self, learning_rate=1e-6, size_in=0):
        super(LearningRuleType, self).__init__()
        self.learning_rate = learning_rate
        self.size_in = size_in

    def __repr__(self):
        return '%s(%s)' % (type(self).__name__, ", ".join(self._argreprs))

    @property
    def _argreprs(self):
        return (["learning_rate=%g" % self.learning_rate]
                if self.learning_rate != 1e-6 else [])
예제 #16
0
class LIF(LIFRate):
    """Spiking version of the leaky integrate-and-fire (LIF) neuron model.

    Parameters
    ----------
    tau_rc : float
        Membrane RC time constant, in seconds. Affects how quickly the membrane
        voltage decays to zero in the absence of input (larger = slower decay).
    tau_ref : float
        Absolute refractory period, in seconds. This is how long the
        membrane voltage is held at zero after a spike.
    min_voltage : float
        Minimum value for the membrane voltage. If ``-np.inf``, the voltage
        is never clipped.
    amplitude : float
        Scaling factor on the neuron output. Corresponds to the relative
        amplitude of the output spikes of the neuron.
    initial_state : {str: Distribution or array_like}
        Mapping from state variables names to their desired initial value.
        These values will override the defaults set in the class's state attribute.
    """

    state = {
        "spikes": Choice([0]),
        "voltage": Uniform(low=0, high=1),
        "refractory_time": Choice([0]),
    }

    min_voltage = NumberParam("min_voltage", high=0)

    def __init__(
        self, tau_rc=0.02, tau_ref=0.002, min_voltage=0, amplitude=1, initial_state=None
    ):
        super().__init__(
            tau_rc=tau_rc,
            tau_ref=tau_ref,
            amplitude=amplitude,
            initial_state=initial_state,
        )
        self.min_voltage = min_voltage

    def step(self, dt, J, spikes, voltage, refractory_time):
        # reduce all refractory times by dt
        refractory_time -= dt

        # compute effective dt for each neuron, based on remaining time.
        # note that refractory times that have completed midway into this
        # timestep will be given a partial timestep, and moreover these will
        # be subtracted to zero at the next timestep (or reset by a spike)
        delta_t = (dt - refractory_time).clip(0, dt)

        # update voltage using discretized lowpass filter
        # since v(t) = v(0) + (J - v(0))*(1 - exp(-t/tau)) assuming
        # J is constant over the interval [t, t + dt)
        voltage -= (J - voltage) * np.expm1(-delta_t / self.tau_rc)

        # determine which neurons spiked (set them to 1/dt, else 0)
        spiked_mask = voltage > 1
        spikes[:] = spiked_mask * (self.amplitude / dt)

        # set v(0) = 1 and solve for t to compute the spike time
        t_spike = dt + self.tau_rc * np.log1p(
            -(voltage[spiked_mask] - 1) / (J[spiked_mask] - 1)
        )

        # set spiked voltages to zero, refractory times to tau_ref, and
        # rectify negative voltages to a floor of min_voltage
        voltage[voltage < self.min_voltage] = self.min_voltage
        voltage[spiked_mask] = 0
        refractory_time[spiked_mask] = self.tau_ref + t_spike
예제 #17
0
class Process(FrozenObject):
    """A general system with input, output, and state.

    For more details on how to use processes and make
    custom process subclasses, see :doc:`examples/processes`.

    Parameters
    ----------
    default_size_in : int (Default: 0)
        Sets the default size in for nodes using this process.
    default_size_out : int (Default: 1)
        Sets the default size out for nodes running this process. Also,
        if ``d`` is not specified in `~.Process.run` or `~.Process.run_steps`,
        this will be used.
    default_dt : float (Default: 0.001 (1 millisecond))
        If ``dt`` is not specified in `~.Process.run`, `~.Process.run_steps`,
        `~.Process.ntrange`, or `~.Process.trange`, this will be used.
    seed : int, optional (Default: None)
        Random number seed. Ensures random factors will be the same each run.

    Attributes
    ----------
    default_dt : float
        If ``dt`` is not specified in `~.Process.run`, `~.Process.run_steps`,
        `~.Process.ntrange`, or `~.Process.trange`, this will be used.
    default_size_in : int
        The default size in for nodes using this process.
    default_size_out : int
        The default size out for nodes running this process. Also, if ``d`` is
        not specified in `~.Process.run` or `~.Process.run_steps`,
        this will be used.
    seed : int or None
        Random number seed. Ensures random factors will be the same each run.
    """

    default_size_in = IntParam('default_size_in', low=0)
    default_size_out = IntParam('default_size_out', low=0)
    default_dt = NumberParam('default_dt', low=0, low_open=True)
    seed = IntParam('seed', low=0, high=maxint, optional=True)

    def __init__(self, default_size_in=0, default_size_out=1,
                 default_dt=0.001, seed=None):
        super(Process, self).__init__()
        self.default_size_in = default_size_in
        self.default_size_out = default_size_out
        self.default_dt = default_dt
        self.seed = seed

    def apply(self, x, d=None, dt=None, rng=np.random, copy=True, **kwargs):
        """Run process on a given input.

        Keyword arguments that do not appear in the parameter list below
        will be passed to the ``make_step`` function of this process.

        Parameters
        ----------
        x : ndarray
            The input signal given to the process.
        d : int, optional (Default: None)
            Output dimensionality. If None, ``default_size_out`` will be used.
        dt : float, optional (Default: None)
            Simulation timestep. If None, ``default_dt`` will be used.
        rng : `numpy.random.RandomState` (Default: ``numpy.random``)
            Random number generator used for stochstic processes.
        copy : bool, optional (Default: True)
            If True, a new output array will be created for output.
            If False, the input signal ``x`` will be overwritten.
        """
        shape_in = as_shape(np.asarray(x[0]).shape, min_dim=1)
        shape_out = as_shape(self.default_size_out if d is None else d)
        dt = self.default_dt if dt is None else dt
        rng = self.get_rng(rng)
        step = self.make_step(shape_in, shape_out, dt, rng, **kwargs)
        output = np.zeros((len(x),) + shape_out) if copy else x
        for i, xi in enumerate(x):
            output[i] = step((i+1) * dt, xi)
        return output

    def get_rng(self, rng):
        """Get a properly seeded independent RNG for the process step.

        Parameters
        ----------
        rng : `numpy.random.RandomState`
            The parent random number generator to use if the seed is not set.
        """
        seed = rng.randint(maxint) if self.seed is None else self.seed
        return np.random.RandomState(seed)

    def make_step(self, shape_in, shape_out, dt, rng):
        """Create function that advances the process forward one time step.

        This must be implemented by all custom processes.

        Parameters
        ----------
        shape_in : tuple
            The shape of the input signal.
        shape_out : tuple
            The shape of the output signal.
        dt : float
            The simulation timestep.
        rng : `numpy.random.RandomState`
            A random number generator.
        """
        raise NotImplementedError("Process must implement `make_step` method.")

    def run(self, t, d=None, dt=None, rng=np.random, **kwargs):
        """Run process without input for given length of time.

        Keyword arguments that do not appear in the parameter list below
        will be passed to the ``make_step`` function of this process.

        Parameters
        ----------
        t : float
            The length of time to run.
        d : int, optional (Default: None)
            Output dimensionality. If None, ``default_size_out`` will be used.
        dt : float, optional (Default: None)
            Simulation timestep. If None, ``default_dt`` will be used.
        rng : `numpy.random.RandomState` (Default: ``numpy.random``)
            Random number generator used for stochstic processes.
        """
        dt = self.default_dt if dt is None else dt
        n_steps = int(np.round(float(t) / dt))
        return self.run_steps(n_steps, d=d, dt=dt, rng=rng, **kwargs)

    def run_steps(self, n_steps, d=None, dt=None, rng=np.random, **kwargs):
        """Run process without input for given number of steps.

        Keyword arguments that do not appear in the parameter list below
        will be passed to the ``make_step`` function of this process.

        Parameters
        ----------
        n_steps : int
            The number of steps to run.
        d : int, optional (Default: None)
            Output dimensionality. If None, ``default_size_out`` will be used.
        dt : float, optional (Default: None)
            Simulation timestep. If None, ``default_dt`` will be used.
        rng : `numpy.random.RandomState` (Default: ``numpy.random``)
            Random number generator used for stochstic processes.
        """
        shape_in = as_shape(0)
        shape_out = as_shape(self.default_size_out if d is None else d)
        dt = self.default_dt if dt is None else dt
        rng = self.get_rng(rng)
        step = self.make_step(shape_in, shape_out, dt, rng, **kwargs)
        output = np.zeros((n_steps,) + shape_out)
        for i in range(n_steps):
            output[i] = step((i+1) * dt)
        return output

    def ntrange(self, n_steps, dt=None):
        """Create time points corresponding to a given number of steps.

        Parameters
        ----------
        n_steps : int
            The given number of steps.
        dt : float, optional (Default: None)
            Simulation timestep. If None, ``default_dt`` will be used.
        """
        dt = self.default_dt if dt is None else dt
        return dt * np.arange(1, n_steps + 1)

    def trange(self, t, dt=None):
        """Create time points corresponding to a given length of time.

        Parameters
        ----------
        t : float
            The given length of time.
        dt : float, optional (Default: None)
            Simulation timestep. If None, ``default_dt`` will be used.
        """
        dt = self.default_dt if dt is None else dt
        n_steps = int(np.round(float(t) / dt))
        return self.ntrange(n_steps, dt=dt)
예제 #18
0
class AdaptiveLIF(LIF):
    """Adaptive spiking version of the LIF neuron model.

    Works as the LIF model, except with adapation state ``n``, which is
    subtracted from the input current. Its dynamics are::

        tau_n dn/dt = -n

    where ``n`` is incremented by ``inc_n`` when the neuron spikes.

    Parameters
    ----------
    tau_n : float
        Adaptation time constant. Affects how quickly the adaptation state
        decays to zero in the absence of spikes (larger = slower decay).
    inc_n : float
        Adaptation increment. How much the adaptation state is increased after
        each spike.
    tau_rc : float
        Membrane RC time constant, in seconds. Affects how quickly the membrane
        voltage decays to zero in the absence of input (larger = slower decay).
    tau_ref : float
        Absolute refractory period, in seconds. This is how long the
        membrane voltage is held at zero after a spike.
    min_voltage : float
        Minimum value for the membrane voltage. If ``-np.inf``, the voltage
        is never clipped.
    amplitude : float
        Scaling factor on the neuron output. Corresponds to the relative
        amplitude of the output spikes of the neuron.
    initial_state : {str: Distribution or array_like}
        Mapping from state variables names to their desired initial value.
        These values will override the defaults set in the class's state attribute.

    References
    ----------
    .. [1] Camera, Giancarlo La, et al. "Minimal models of adapted neuronal
       response to in Vivo-Like input currents." Neural computation
       16.10 (2004): 2101-2124.
    """

    state = {
        "spikes": Choice([0]),
        "voltage": Uniform(low=0, high=1),
        "refractory_time": Choice([0]),
        "adaptation": Choice([0]),
    }

    tau_n = NumberParam("tau_n", low=0, low_open=True)
    inc_n = NumberParam("inc_n", low=0)

    def __init__(
        self,
        tau_n=1,
        inc_n=0.01,
        tau_rc=0.02,
        tau_ref=0.002,
        min_voltage=0,
        amplitude=1,
        initial_state=None,
    ):
        super().__init__(
            tau_rc=tau_rc,
            tau_ref=tau_ref,
            min_voltage=min_voltage,
            amplitude=amplitude,
            initial_state=initial_state,
        )
        self.tau_n = tau_n
        self.inc_n = inc_n

    def step(self, dt, J, spikes, voltage, refractory_time, adaptation):
        """Implement the AdaptiveLIF nonlinearity."""
        n = adaptation
        super().step(dt, J - n, spikes, voltage, refractory_time)
        n += (dt / self.tau_n) * (self.inc_n * spikes - n)
class LstsqDrop(Solver):
    """Find sparser decoders/weights by dropping small values.

    This solver first solves for coefficients (decoders/weights) with
    L2 regularization, drops those nearest to zero, and retrains remaining.
    """

    compositional = False

    drop = NumberParam("drop", low=0, high=1)
    solver1 = SolverParam("solver1")
    solver2 = SolverParam("solver2")

    def __init__(
            self,
            weights=False,
            drop=0.25,
            solver1=LstsqL2(reg=0.001),
            solver2=LstsqL2(reg=0.1),
    ):
        """
        Parameters
        ----------
        weights : bool, optional
            If False, solve for decoders. If True, solve for weights.
        drop : float, optional
            Fraction of decoders or weights to set to zero.
        solver1 : Solver, optional
            Solver for finding the initial decoders.
        solver2 : Solver, optional
            Used for re-solving for the decoders after dropout.

        Attributes
        ----------
        drop : float
            Fraction of decoders or weights to set to zero.
        solver1 : Solver
            Solver for finding the initial decoders.
        solver2 : Solver
            Used for re-solving for the decoders after dropout.
        weights : bool
            If False, solve for decoders. If True, solve for weights.
        """
        super().__init__(weights=weights)
        self.drop = drop
        self.solver1 = solver1
        self.solver2 = solver2

    def __call__(self, A, Y, rng=np.random):
        tstart = time.time()
        Y, m, n, _, matrix_in = format_system(A, Y)

        # solve for coefficients using standard solver
        X, info0 = self.solver1(A, Y, rng=rng)

        # drop weights close to zero, based on `drop` ratio
        Xabs = np.sort(np.abs(X.flat))
        threshold = Xabs[int(np.round(self.drop * Xabs.size))]
        X[np.abs(X) < threshold] = 0

        # retrain nonzero weights
        for i in range(X.shape[1]):
            nonzero = X[:, i] != 0
            if nonzero.sum() > 0:
                X[nonzero, i], info1 = self.solver2(A[:, nonzero],
                                                    Y[:, i],
                                                    rng=rng)

        t = time.time() - tstart
        info = {
            "rmses": rmses(A, X, Y),
            "info0": info0,
            "info1": info1,
            "time": t
        }
        return X if matrix_in or X.shape[1] > 1 else X.ravel(), info
예제 #20
0
class Izhikevich(NeuronType):
    """Izhikevich neuron model.

    This implementation is based on the original paper [1]_;
    however, we rename some variables for clarity.
    What was originally 'v' we term 'voltage', which represents the membrane
    potential of each neuron. What was originally 'u' we term 'recovery',
    which represents membrane recovery, "which accounts for the activation
    of K+ ionic currents and inactivation of Na+ ionic currents."
    The 'a', 'b', 'c', and 'd' parameters are also renamed
    (see the parameters below).

    We use default values that correspond to regular spiking ('RS') neurons.
    For other classes of neurons, set the parameters as follows.

    * Intrinsically bursting (IB): ``reset_voltage=-55, reset_recovery=4``
    * Chattering (CH): ``reset_voltage=-50, reset_recovery=2``
    * Fast spiking (FS): ``tau_recovery=0.1``
    * Low-threshold spiking (LTS): ``coupling=0.25``
    * Resonator (RZ): ``tau_recovery=0.1, coupling=0.26``

    Parameters
    ----------
    tau_recovery : float, optional
        (Originally 'a') Time scale of the recovery variable.
    coupling : float, optional
        (Originally 'b') How sensitive recovery is to subthreshold
        fluctuations of voltage.
    reset_voltage : float, optional
        (Originally 'c') The voltage to reset to after a spike, in millivolts.
    reset_recovery : float, optional
        (Originally 'd') The recovery value to reset to after a spike.
    initial_state : {str: Distribution or array_like}
        Mapping from state variables names to their desired initial value.
        These values will override the defaults set in the class's state attribute.

    References
    ----------
    .. [1] E. M. Izhikevich, "Simple model of spiking neurons."
       IEEE Transactions on Neural Networks, vol. 14, no. 6, pp. 1569-1572.
       (http://www.izhikevich.org/publications/spikes.pdf)
    """

    state = {
        "spikes": Choice([0]),
        "voltage": Uniform(low=0, high=1),
        "recovery": Choice([0]),
    }
    negative = False

    tau_recovery = NumberParam("tau_recovery", low=0, low_open=True)
    coupling = NumberParam("coupling", low=0)
    reset_voltage = NumberParam("reset_voltage")
    reset_recovery = NumberParam("reset_recovery")

    def __init__(
        self,
        tau_recovery=0.02,
        coupling=0.2,
        reset_voltage=-65.0,
        reset_recovery=8.0,
        initial_state=None,
    ):
        super().__init__(initial_state)
        self.tau_recovery = tau_recovery
        self.coupling = coupling
        self.reset_voltage = reset_voltage
        self.reset_recovery = reset_recovery

    def rates(self, x, gain, bias):
        """Estimates steady-state firing rate given gain and bias."""
        J = self.current(x, gain, bias)
        return settled_firingrate(
            self.step,
            J,
            state={
                "spikes": np.zeros_like(J),
                "voltage": np.zeros_like(J),
                "recovery": np.zeros_like(J),
            },
            settle_time=0.001,
            sim_time=1.0,
        )

    def step(self, dt, J, spikes, voltage, recovery):
        """Implement the Izhikevich nonlinearity."""
        # Numerical instability occurs for very low inputs.
        # We'll clip them be greater than some value that was chosen by
        # looking at the simulations for many parameter sets.
        # A more principled minimum value would be better.
        J = np.maximum(-30.0, J)

        dV = (0.04 * voltage ** 2 + 5 * voltage + 140 - recovery + J) * 1000
        voltage[:] += dV * dt

        # We check for spikes and reset the voltage here rather than after,
        # which differs from the original implementation by Izhikevich.
        # However, calculating recovery for voltage values greater than
        # threshold can cause the system to blow up, which we want
        # to avoid at all costs.
        spikes[:] = (voltage >= 30) / dt
        voltage[spikes > 0] = self.reset_voltage

        dU = (self.tau_recovery * (self.coupling * voltage - recovery)) * 1000
        recovery[:] += dU * dt
        recovery[spikes > 0] = recovery[spikes > 0] + self.reset_recovery
예제 #21
0
파일: neurons.py 프로젝트: qitsweauca/nengo
class Izhikevich(NeuronType):
    """Izhikevich neuron model.

    This implementation is based on the original paper [1]_;
    however, we rename some variables for clarity.
    What was originally 'v' we term 'voltage', which represents the membrane
    potential of each neuron. What was originally 'u' we term 'recovery',
    which represents membrane recovery, "which accounts for the activation
    of K+ ionic currents and inactivation of Na+ ionic currents."
    The 'a', 'b', 'c', and 'd' parameters are also renamed
    (see the parameters below).

    We use default values that correspond to regular spiking ('RS') neurons.
    For other classes of neurons, set the parameters as follows.

    * Intrinsically bursting (IB): ``reset_voltage=-55, reset_recovery=4``
    * Chattering (CH): ``reset_voltage=-50, reset_recovery=2``
    * Fast spiking (FS): ``tau_recovery=0.1``
    * Low-threshold spiking (LTS): ``coupling=0.25``
    * Resonator (RZ): ``tau_recovery=0.1, coupling=0.26``

    Parameters
    ----------
    tau_recovery : float, optional (Default: 0.02)
        (Originally 'a') Time scale of the recovery variable.
    coupling : float, optional (Default: 0.2)
        (Originally 'b') How sensitive recovery is to subthreshold
        fluctuations of voltage.
    reset_voltage : float, optional (Default: -65.)
        (Originally 'c') The voltage to reset to after a spike, in millivolts.
    reset_recovery : float, optional (Default: 8.)
        (Originally 'd') The recovery value to reset to after a spike.

    References
    ----------
    .. [1] E. M. Izhikevich, "Simple model of spiking neurons."
       IEEE Transactions on Neural Networks, vol. 14, no. 6, pp. 1569-1572.
       (http://www.izhikevich.org/publications/spikes.pdf)
    """

    probeable = ('spikes', 'voltage', 'recovery')

    tau_recovery = NumberParam('tau_recovery', low=0, low_open=True)
    coupling = NumberParam('coupling', low=0)
    reset_voltage = NumberParam('reset_voltage')
    reset_recovery = NumberParam('reset_recovery')

    def __init__(self,
                 tau_recovery=0.02,
                 coupling=0.2,
                 reset_voltage=-65.,
                 reset_recovery=8.):
        super(Izhikevich, self).__init__()
        self.tau_recovery = tau_recovery
        self.coupling = coupling
        self.reset_voltage = reset_voltage
        self.reset_recovery = reset_recovery

    @property
    def _argreprs(self):
        args = []

        def add(attr, default):
            if getattr(self, attr) != default:
                args.append("%s=%s" % (attr, getattr(self, attr)))

        add("tau_recovery", 0.02)
        add("coupling", 0.2)
        add("reset_voltage", -65.)
        add("reset_recovery", 8.)
        return args

    def rates(self, x, gain, bias):
        """Estimates steady-state firing rate given gain and bias.

        Uses the `.settled_firingrate` helper function.
        """
        J = self.current(x, gain, bias)
        voltage = np.zeros_like(J)
        recovery = np.zeros_like(J)
        return settled_firingrate(self.step_math,
                                  J, [voltage, recovery],
                                  settle_time=0.001,
                                  sim_time=1.0)

    def step_math(self, dt, J, spiked, voltage, recovery):
        """Implement the Izhikevich nonlinearity."""
        # Numerical instability occurs for very low inputs.
        # We'll clip them be greater than some value that was chosen by
        # looking at the simulations for many parameter sets.
        # A more principled minimum value would be better.
        J = np.maximum(-30., J)

        dV = (0.04 * voltage**2 + 5 * voltage + 140 - recovery + J) * 1000
        voltage[:] += dV * dt

        # We check for spikes and reset the voltage here rather than after,
        # which differs from the original implementation by Izhikevich.
        # However, calculating recovery for voltage values greater than
        # threshold can cause the system to blow up, which we want
        # to avoid at all costs.
        spiked[:] = (voltage >= 30) / dt
        voltage[spiked > 0] = self.reset_voltage

        dU = (self.tau_recovery * (self.coupling * voltage - recovery)) * 1000
        recovery[:] += dU * dt
        recovery[spiked > 0] = recovery[spiked > 0] + self.reset_recovery
예제 #22
0
class Thalamus(Network):
    """Inhibits non-selected actions.

    The thalamus is intended to work in tandem with a `.BasalGanglia` module.
    It converts basal ganglia output into a signal with (approximately) 1 for
    the selected action and 0 elsewhere.

    In order to suppress low responses and strengthen high responses,
    a constant bias is added to each dimension (i.e., action), and dimensions
    mutually inhibit each other. Additionally, the ensemble representing
    each dimension is created with positive encoders and can be assigned
    positive x-intercepts to threshold low responses.

    Parameters
    ----------
    neurons_action : int, optional (Default: 50)
        Number of neurons per action to represent the selection.
    threshold_action : float, optional (Default: 0.2)
        Minimum value for action representation.
    mutual_inhibit : float, optional (Default: 1.0)
        Strength of inhibition between actions.
    route_inhibit : float, optional (Default: 3.0)
        Strength of inhibition for unchosen actions.
    synapse_inhibit : float, optional (Default: 0.008)
        Synaptic filter to apply for inhibition between actions.
    synapse_bg : float, optional (Default: 0.008)
        Synaptic filter for connection between basal ganglia and thalamus.
    synapse_direct : float, optional (Default: 0.01)
        Synaptic filter for direct outputs.
    neurons_channel_dim : int, optional (Default: 50)
        Number of neurons per routing channel dimension.
    synapse_channel : float, optional (Default: 0.01)
        Synaptic filter for channel inputs and outputs.
    neurons_gate : int, optional (Default: 40)
        Number of neurons per gate.
    threshold_gate : float, optional (Default: 0.3)
        Minimum value for gating neurons.
    synapse_to-gate : float, optional (Default: 0.002)
        Synaptic filter for controlling a gate.
    kwargs : dict
        Passed through to `nengo_spa.Network`.

    Attributes
    ----------
    actions : nengo.networks.EnsembleArray
        Each ensemble represents one dimension (action).
    bias : nengo.Node
        The constant bias injected in each *actions* ensemble.
    input : nengo.Node
        Input to the *actions* ensembles.
    output : nengo.Node
        Output from the *actions* ensembles.
    """

    neurons_action = IntParam('neurons_action', default=50)
    threshold_action = NumberParam('threshold_action', default=0.2)
    mutual_inhibit = NumberParam('mutual_inhibit', default=1.)
    route_inhibit = NumberParam('route_inhibit', default=3.)
    synapse_inhibit = SynapseParam('synapse_inhibit', default=Lowpass(0.008))
    synapse_bg = SynapseParam('synapse_bg', default=Lowpass(0.008))
    neurons_channel_dim = IntParam('neurons_channel_dim', default=50)
    synapse_channel = SynapseParam('synapse_channel', default=Lowpass(0.01))
    neurons_gate = IntParam('neurons_gate', default=40)
    threshold_gate = NumberParam('threshold_gate', default=0.3)
    synapse_to_gate = SynapseParam('synapse_to_gate', default=Lowpass(0.002))

    def __init__(self,
                 action_count,
                 neurons_action=Default,
                 threshold_action=Default,
                 mutual_inhibit=Default,
                 route_inhibit=Default,
                 synapse_inhibit=Default,
                 synapse_bg=Default,
                 neurons_channel_dim=Default,
                 synapse_channel=Default,
                 neurons_gate=Default,
                 threshold_gate=Default,
                 synapse_to_gate=Default,
                 **kwargs):
        kwargs.setdefault('label', "Thalamus")
        super(Thalamus, self).__init__(**kwargs)

        self.action_count = action_count
        self.neurons_action = neurons_action
        self.mutual_inhibit = mutual_inhibit
        self.route_inhibit = route_inhibit
        self.synapse_inhibit = synapse_inhibit
        self.threshold_action = threshold_action
        self.neurons_channel_dim = neurons_channel_dim
        self.synapse_channel = synapse_channel
        self.neurons_gate = neurons_gate
        self.threshold_gate = threshold_gate
        self.synapse_to_gate = synapse_to_gate
        self.synapse_bg = synapse_bg

        self.gates = {}  # gating ensembles per action (created as needed)
        self.channels = []  # channels to pass data between networks

        self.gate_in_connections = {}
        self.gate_out_connections = {}
        self.channel_out_connections = []
        self.fixed_connections = {}
        self.bg_connection = None

        with self:
            self.actions = nengo.networks.EnsembleArray(
                self.neurons_action,
                self.action_count,
                intercepts=nengo.dists.Uniform(self.threshold_action, 1),
                encoders=nengo.dists.Choice([[1.0]]),
                label="actions")
            nengo.Connection(self.actions.output,
                             self.actions.input,
                             transform=(np.eye(self.action_count) - 1) *
                             self.mutual_inhibit)
            self.bias = nengo.Node([1], label="thalamus bias")
            nengo.Connection(self.bias,
                             self.actions.input,
                             transform=np.ones((self.action_count, 1)))

        self.input = self.actions.input
        self.output = self.actions.output

    def construct_gate(self, index, bias, label=None):
        """Construct a gate ensemble.

        The gate neurons have no activity when the action is selected, but are
        active when the action is not selected. This makes the gate useful for
        inhibiting ensembles that should only be active when this action is
        active.

        Parameters
        ----------
        index : int
            Index to identify the gate.
        bias : :class:`nengo.Network`
            Node providing a bias input of 1.
        label : str, optional
            Label for the gate.

        Returns
        -------
        nengo.Ensemble
            The constructed gate.
        """
        if label is None:
            label = 'gate[%d]' % index
        intercepts = Uniform(self.threshold_gate, 1)
        self.gates[index] = gate = nengo.Ensemble(self.neurons_gate,
                                                  dimensions=1,
                                                  intercepts=intercepts,
                                                  label=label,
                                                  encoders=[[1]] *
                                                  self.neurons_gate)
        nengo.Connection(bias, gate, synapse=None)

        self.gate_in_connections[index] = nengo.Connection(
            self.actions.ensembles[index],
            self.gates[index],
            synapse=self.synapse_to_gate,
            transform=-1)

        return self.gates[index]

    def construct_channel(self, sink, type_, label=None):
        """Construct a channel.

        Channels are an additional neural population in-between a source
        population and a target population. This allows inhibiting the channel
        without affecting the source and thus is useful in routing information.

        Parameters
        ----------
        sink : nengo.base.NengoObject
            Sink/target that the channel feeds into.
        type_ : nengo_spa.types.Type
            Type of the data transmitted through the channel.
        label : str, optional
            Label for the channel.

        Returns
        -------
        :class:`nengo.networks.EnsembleArray`
            The constructed channel.
        """
        if label is None:
            label = 'channel'
        if type_ == TScalar:
            channel = dynamic.ScalarRealization()
        else:
            channel = dynamic.StateRealization(vocab=type_.vocab)

        self.channels.append(channel)
        self.channel_out_connections.append(
            nengo.Connection(channel.output,
                             sink,
                             synapse=self.synapse_channel))
        return channel

    def connect_bg(self, bg):
        """Connect a basal ganglia network to this thalamus."""
        self.bg_connection = nengo.Connection(bg.output,
                                              self.input,
                                              synapse=self.synapse_bg)

    def connect_gate(self, index, channel):
        """Connect a gate to a channel for information routing.

        Parameters
        ----------
        index : int
            Index of the gate to connect.
        channel : nengo.networks.EnsembleArray
            Channel to inhibit with the gate.
        """
        if isinstance(channel, Scalar):
            target = channel.scalar.neurons
        elif isinstance(channel, State):
            target = channel.state_ensembles.add_neuron_input()
        else:
            raise NotImplementedError()

        inhibit = ([[-self.route_inhibit]] * (target.size_in))
        self.gate_out_connections[index] = nengo.Connection(
            self.gates[index],
            target,
            transform=inhibit,
            synapse=self.synapse_inhibit)

    def connect_fixed(self, index, target, transform):
        """Create connection to route fixed value.

        Parameters
        ----------
        index : int
            Index of the action to connect.
        target : nengo.base.NengoObject
            Target of the connection.
        transform : array-like
            Transform to apply to apply to the connection.
        """
        self.fixed_connections[index] = self.connect(
            self.actions.ensembles[index], target, transform)

    def connect(self, source, target, transform):
        """Create connection.

        The connection will use the thalamus's *synapse_channel*.

        Parameters
        ----------
        source : nengo.base.NengoObject
            Source object.
        target : nengo.base.NengoObject
            Target object.
        transform : array-like
            Transform to apply to the connection.
        """
        return nengo.Connection(source,
                                target,
                                transform=transform,
                                synapse=self.synapse_channel)
예제 #23
0
class TripletSTDP(nengo.learning_rules.LearningRuleType):
    """Triplet spike-timing dependent plasticity rule.

    From "Triplets of Spikes in a Model of Spike Timing-Dependent Plasticity",
    Pfister & Gerstner, 2006.
    Here we implement the full model.
    """

    # Used by other Nengo objects
    modifies = "weights"
    probeable = ("pre_trace1", "pre_trace2", "post_trace1", "post_trace2")

    # Parameters
    pre_tau = NumberParam("pre_tau", low=0, low_open=True)
    pre_taux = NumberParam("pre_taux", low=0, low_open=True)
    pre_amp2 = NumberParam("pre_amp2", low=0, low_open=True)
    pre_amp3 = NumberParam("pre_amp3", low=0, low_open=True)
    post_tau = NumberParam("post_tau", low=0, low_open=True)
    post_tauy = NumberParam("post_tauy", low=0, low_open=True)
    post_amp2 = NumberParam("post_amp2", low=0, low_open=True)
    post_amp3 = NumberParam("post_amp3", low=0, low_open=True)
    nearest_spike = BoolParam("nearest_spike")

    def __init__(self,
                 param_set="default",
                 nearest_spike=False,
                 learning_rate=1e-9):
        """Uses parameter sets defined by Pfister & Gerstner, 2006."""
        self.pre_tau = 0.0168
        self.post_tau = 0.0337
        self.nearest_spike = nearest_spike
        if param_set == "default":
            self.pre_taux = 0.101
            self.post_tauy = 0.125
            self.pre_amp2 = 5e-10
            self.pre_amp3 = 6.2e-3
            self.post_amp2 = 7e-3
            self.post_amp3 = 2.3e-4
        elif param_set == "visual" and nearest_spike:
            self.pre_taux = 0.714
            self.post_tauy = 0.04
            self.pre_amp2 = 8.8e-11
            self.pre_amp3 = 5.3e-2
            self.post_amp2 = 6.6e-3
            self.post_amp3 = 3.1e-3
        elif param_set == "visual" and not nearest_spike:
            self.pre_taux = 0.101
            self.post_tauy = 0.125
            self.pre_amp2 = 5e-10
            self.pre_amp3 = 6.2e-3
            self.post_amp2 = 7e-3
            self.post_amp3 = 2.3e-4
        elif param_set == "hippocampal" and self.nearest_spike:
            self.pre_taux = 0.575
            self.post_tauy = 0.047
            self.pre_amp2 = 4.6e-3
            self.pre_amp3 = 9.1e-3
            self.post_amp2 = 3e-3
            self.post_amp3 = 7.5e-9
        elif param_set == "hippocampal" and not self.nearest_spike:
            self.pre_taux = 0.946
            self.post_tauy = 0.027
            self.pre_amp2 = 6.1e-3
            self.pre_amp3 = 6.7e-3
            self.post_amp2 = 1.6e-3
            self.post_amp3 = 1.4e-3
        else:
            raise ValueError("Only 'visual' and 'hippocampal' recognized.")

        super(TripletSTDP, self).__init__(learning_rate)
예제 #24
0
class Probe(NengoObject):
    """A probe is an object that receives data from the simulation.

    This is to be used in any situation where you wish to gather simulation
    data (spike data, represented values, neuron voltages, etc.) for analysis.

    Probes cannot directly affect the simulation.

    TODO: Example usage for each object.

    Parameters
    ----------
    target : Ensemble, Node, Connection
        The Nengo object to connect to the probe.
    attr : str, optional
        The quantity to probe. Refer to the target's ``probeable`` list for
        details. Defaults to the first element in the list.
    sample_every : float, optional
        Sampling period in seconds.
    conn_args : dict, optional
        Optional keyword arguments to pass to the Connection created for this
        probe. For example, passing ``synapse=pstc`` will filter the data.
    """

    target = NengoObjectParam(nonzero_size_out=True)
    attr = StringParam(default=None)
    sample_every = NumberParam(default=None, optional=True, low=1e-10)
    conn_args = DictParam(default=None)
    seed = IntParam(default=None, optional=True)

    def __init__(self,
                 target,
                 attr=Default,
                 sample_every=Default,
                 **conn_args):
        if not hasattr(target, 'probeable') or len(target.probeable) == 0:
            raise TypeError("Type '%s' is not probeable" %
                            target.__class__.__name__)

        conn_args.setdefault('synapse', None)

        # We'll use the first in the list as default
        self.attr = attr if attr is not Default else target.probeable[0]

        if self.attr not in target.probeable:
            raise ValueError("'%s' is not probeable for '%s'" %
                             (self.attr, target))

        self.target = target
        self.sample_every = sample_every
        self.conn_args = conn_args
        self.seed = conn_args.get('seed', None)

    @property
    def label(self):
        return "Probe(%s.%s)" % (self.target.label, self.attr)

    @property
    def size_in(self):
        # TODO: A bit of a hack; make less hacky.
        if isinstance(self.target, Ensemble) and self.attr != "decoded_output":
            return self.target.neurons.size_out
        return self.target.size_out

    @property
    def size_out(self):
        return 0
예제 #25
0
class Ensemble(NengoObject):
    """A group of neurons that collectively represent a vector.

    Parameters
    ----------
    n_neurons : int
        The number of neurons.
    dimensions : int
        The number of representational dimensions.

    radius : int, optional (Default: 1.0)
        The representational radius of the ensemble.
    encoders : Distribution or (n_neurons, dimensions) array_like, optional \
               (Default: UniformHypersphere(surface=True))
        The encoders used to transform from representational space
        to neuron space. Each row is a neuron's encoder; each column is a
        representational dimension.
    intercepts : Distribution or (n_neurons,) array_like, optional \
                 (Default: ``nengo.dists.Uniform(-1.0, 1.0)``)
        The point along each neuron's encoder where its activity is zero. If
        ``e`` is the neuron's encoder, then the activity will be zero when
        ``dot(x, e) <= c``, where ``c`` is the given intercept.
    max_rates : Distribution or (n_neurons,) array_like, optional \
                (Default: ``nengo.dists.Uniform(200, 400)``)
        The activity of each neuron when the input signal ``x`` is magnitude 1
        and aligned with that neuron's encoder ``e``;
        i.e., when ``dot(x, e) = 1``.
    eval_points : Distribution or (n_eval_points, dims) array_like, optional \
                  (Default: ``nengo.dists.UniformHypersphere()``)
        The evaluation points used for decoder solving, spanning the interval
        (-radius, radius) in each dimension, or a distribution from which
        to choose evaluation points.
    n_eval_points : int, optional (Default: None)
        The number of evaluation points to be drawn from the `eval_points`
        distribution. If None, then a heuristic is used to determine
        the number of evaluation points.
    neuron_type : `~nengo.neurons.NeuronType`, optional \
                  (Default: ``nengo.LIF()``)
        The model that simulates all neurons in the ensemble
        (see `~nengo.neurons.NeuronType`).
    gain : Distribution or (n_neurons,) array_like (Default: None)
        The gains associated with each neuron in the ensemble. If None, then
        the gain will be solved for using ``max_rates`` and ``intercepts``.
    bias : Distribution or (n_neurons,) array_like (Default: None)
        The biases associated with each neuron in the ensemble. If None, then
        the gain will be solved for using ``max_rates`` and ``intercepts``.
    noise : Process, optional (Default: None)
        Random noise injected directly into each neuron in the ensemble
        as current. A sample is drawn for each individual neuron on
        every simulation step.
    label : str, optional (Default: None)
        A name for the ensemble. Used for debugging and visualization.
    seed : int, optional (Default: None)
        The seed used for random number generation.

    Attributes
    ----------
    bias : Distribution or (n_neurons,) array_like or None
        The biases associated with each neuron in the ensemble.
    dimensions : int
        The number of representational dimensions.
    encoders : Distribution or (n_neurons, dimensions) array_like
        The encoders, used to transform from representational space
        to neuron space. Each row is a neuron's encoder, each column is a
        representational dimension.
    eval_points : Distribution or (n_eval_points, dims) array_like
        The evaluation points used for decoder solving, spanning the interval
        (-radius, radius) in each dimension, or a distribution from which
        to choose evaluation points.
    gain : Distribution or (n_neurons,) array_like or None
        The gains associated with each neuron in the ensemble.
    intercepts : Distribution or (n_neurons) array_like or None
        The point along each neuron's encoder where its activity is zero. If
        ``e`` is the neuron's encoder, then the activity will be zero when
        ``dot(x, e) <= c``, where ``c`` is the given intercept.
    label : str or None
        A name for the ensemble. Used for debugging and visualization.
    max_rates : Distribution or (n_neurons,) array_like or None
        The activity of each neuron when ``dot(x, e) = 1``,
        where ``e`` is the neuron's encoder.
    n_eval_points : int or None
        The number of evaluation points to be drawn from the `eval_points`
        distribution. If None, then a heuristic is used to determine
        the number of evaluation points.
    n_neurons : int or None
        The number of neurons.
    neuron_type : NeuronType
        The model that simulates all neurons in the ensemble
        (see ``nengo.neurons``).
    noise : Process or None
        Random noise injected directly into each neuron in the ensemble
        as current. A sample is drawn for each individual neuron on
        every simulation step.
    radius : int
        The representational radius of the ensemble.
    seed : int or None
        The seed used for random number generation.
    """

    probeable = ('decoded_output', 'input', 'scaled_encoders')

    n_neurons = IntParam('n_neurons', default=None, low=1)
    dimensions = IntParam('dimensions', default=None, low=1)
    radius = NumberParam('radius', default=1.0, low=1e-10)
    encoders = DistOrArrayParam('encoders',
                                default=UniformHypersphere(surface=True),
                                sample_shape=('n_neurons', 'dimensions'))
    intercepts = DistOrArrayParam('intercepts',
                                  default=Uniform(-1.0, 1.0),
                                  optional=True,
                                  sample_shape=('n_neurons', ))
    max_rates = DistOrArrayParam('max_rates',
                                 default=Uniform(200, 400),
                                 optional=True,
                                 sample_shape=('n_neurons', ))
    eval_points = DistOrArrayParam('eval_points',
                                   default=UniformHypersphere(),
                                   sample_shape=('*', 'dimensions'))
    n_eval_points = IntParam('n_eval_points', default=None, optional=True)
    neuron_type = NeuronTypeParam('neuron_type', default=LIF())
    gain = DistOrArrayParam('gain',
                            default=None,
                            optional=True,
                            sample_shape=('n_neurons', ))
    bias = DistOrArrayParam('bias',
                            default=None,
                            optional=True,
                            sample_shape=('n_neurons', ))
    noise = ProcessParam('noise', default=None, optional=True)

    def __init__(self,
                 n_neurons,
                 dimensions,
                 radius=Default,
                 encoders=Default,
                 intercepts=Default,
                 max_rates=Default,
                 eval_points=Default,
                 n_eval_points=Default,
                 neuron_type=Default,
                 gain=Default,
                 bias=Default,
                 noise=Default,
                 label=Default,
                 seed=Default):
        super(Ensemble, self).__init__(label=label, seed=seed)
        self.n_neurons = n_neurons
        self.dimensions = dimensions
        self.radius = radius
        self.encoders = encoders
        self.intercepts = intercepts
        self.max_rates = max_rates
        self.n_eval_points = n_eval_points
        self.eval_points = eval_points
        self.bias = bias
        self.gain = gain
        self.neuron_type = neuron_type
        self.noise = noise

    def __getitem__(self, key):
        return ObjView(self, key)

    def __len__(self):
        return self.dimensions

    @property
    def neurons(self):
        """A direct interface to the neurons in the ensemble."""
        return Neurons(self)

    @neurons.setter
    def neurons(self, dummy):
        raise ReadonlyError(attr="neurons", obj=self)

    @property
    def size_in(self):
        """The dimensionality of the ensemble."""
        return self.dimensions

    @property
    def size_out(self):
        """The dimensionality of the ensemble."""
        return self.dimensions
예제 #26
0
class BCM(LearningRuleType):
    """Bienenstock-Cooper-Munroe learning rule.

    Modifies connection weights as a function of the presynaptic activity
    and the difference between the postsynaptic activity and the average
    postsynaptic activity.

    Notes
    -----
    The BCM rule is dependent on pre and post neural activities,
    not decoded values, and so is not affected by changes in the
    size of pre and post ensembles. However, if you are decoding from
    the post ensemble, the BCM rule will have an increased effect on
    larger post ensembles because more connection weights are changing.
    In these cases, it may be advantageous to scale the learning rate
    on the BCM rule by ``1 / post.n_neurons``.

    Parameters
    ----------
    theta_tau : float, optional (Default: 1.0)
        A scalar indicating the time constant for theta integration.
    pre_tau : float, optional (Default: 0.005)
        Filter constant on activities of neurons in pre population.
    post_tau : float, optional (Default: None)
        Filter constant on activities of neurons in post population.
        If None, post_tau will be the same as pre_tau.
    learning_rate : float, optional (Default: 1e-9)
        A scalar indicating the rate at which weights will be adjusted.

    Attributes
    ----------
    learning_rate : float
        A scalar indicating the rate at which weights will be adjusted.
    post_tau : float
        Filter constant on activities of neurons in post population.
    pre_tau : float
        Filter constant on activities of neurons in pre population.
    theta_tau : float
        A scalar indicating the time constant for theta integration.
    """

    modifies = 'weights'
    probeable = ('theta', 'pre_filtered', 'post_filtered', 'delta')

    pre_tau = NumberParam('pre_tau', low=0, low_open=True)
    post_tau = NumberParam('post_tau', low=0, low_open=True)
    theta_tau = NumberParam('theta_tau', low=0, low_open=True)

    def __init__(self, pre_tau=0.005, post_tau=None, theta_tau=1.0,
                 learning_rate=1e-9):
        self.theta_tau = theta_tau
        self.pre_tau = pre_tau
        self.post_tau = post_tau if post_tau is not None else pre_tau
        super(BCM, self).__init__(learning_rate, size_in=0)

    @property
    def _argreprs(self):
        args = []
        if self.pre_tau != 0.005:
            args.append("pre_tau=%g" % self.pre_tau)
        if self.post_tau != self.pre_tau:
            args.append("post_tau=%g" % self.post_tau)
        if self.theta_tau != 1.0:
            args.append("theta_tau=%g" % self.theta_tau)
        if self.learning_rate != 1e-9:
            args.append("learning_rate=%g" % self.learning_rate)
        return args
예제 #27
0
class State(Network):
    """Represents a single vector, with optional memory.

    This is a minimal SPA network, useful for passing data along (for example,
    visual input).

    Parameters
    ----------
    vocab : Vocabulary or int
        The vocabulary to use to interpret the vector. If an integer is given,
        the default vocabulary of that dimensionality will be used.
    subdimensions : int, optional (Default: 16)
        The dimension of the individual ensembles making up the vector.
        Must divide *dimensions* evenly. The number of sub-ensembles
        will be ``dimensions // subdimensions``.
    neurons_per_dimension : int, optional (Default: 50)
        Number of neurons per dimension. Each ensemble will have
        ``neurons_per_dimension * subdimensions`` neurons, for a total of
        ``neurons_per_dimension * dimensions`` neurons.
    feedback : float, optional (Default: 0.0)
        Gain of feedback connection. Set to 1.0 for perfect memory,
        or 0.0 for no memory. Values in between will create a decaying memory.
    represent_cc_identity : bool, optional
        Whether to use optimizations to better represent the circular
        convolution identity vector. If activated, the `.IdentityEnsembleArray`
        will be used internally, otherwise a normal
        `nengo.networks.EnsembleArray` split up regularly according to
        *subdimensions*.
    feedback_synapse : float, optional (Default: 0.1)
        The synapse on the feedback connection.
    kwargs : dict
        Keyword arguments passed through to `nengo_spa.Network`.

    Attributes
    ----------
    input : nengo.Node
        Input.
    output : nengo.Node
        Output.
    """

    vocab = VocabularyOrDimParam('vocab', default=None, readonly=True)
    subdimensions = IntParam('subdimensions', default=16, low=1, readonly=True)
    neurons_per_dimension = IntParam(
        'neurons_per_dimension', default=50, low=1, readonly=True)
    feedback = NumberParam('feedback', default=.0, readonly=True)
    feedback_synapse = NumberParam(
        'feedback_synapse', default=.1, readonly=True)
    represent_cc_identity = BoolParam(
        'represent_cc_identity', default=True, readonly=True)

    def __init__(self, vocab=Default, subdimensions=Default,
                 neurons_per_dimension=Default, feedback=Default,
                 represent_cc_identity=Default,
                 feedback_synapse=Default, **kwargs):
        kwargs.setdefault('label', "State")
        super(State, self).__init__(**kwargs)

        self.vocab = vocab
        self.subdimensions = subdimensions
        self.neurons_per_dimension = neurons_per_dimension
        self.feedback = feedback
        self.feedback_synapse = feedback_synapse
        self.represent_cc_identity = represent_cc_identity

        dimensions = self.vocab.dimensions

        if dimensions % self.subdimensions != 0:
            raise ValidationError(
                "Dimensions (%d) must be divisible by subdimensions (%d)" % (
                    dimensions, self.subdimensions),
                attr='dimensions', obj=self)

        with self:
            if self.represent_cc_identity:
                self.state_ensembles = IdentityEnsembleArray(
                    self.neurons_per_dimension, dimensions, self.subdimensions,
                    label='state')
            else:
                self.state_ensembles = EnsembleArray(
                    self.neurons_per_dimension * self.subdimensions,
                    dimensions // self.subdimensions,
                    ens_dimensions=self.subdimensions,
                    eval_points=nengo.dists.CosineSimilarity(dimensions + 2),
                    intercepts=nengo.dists.CosineSimilarity(dimensions + 2),
                    label='state')

            if self.feedback is not None and self.feedback != 0.0:
                nengo.Connection(
                    self.state_ensembles.output, self.state_ensembles.input,
                    transform=self.feedback, synapse=self.feedback_synapse)

        self.input = self.state_ensembles.input
        self.output = self.state_ensembles.output
        self.declare_input(self.input, self.vocab)
        self.declare_output(self.output, self.vocab)
예제 #28
0
class Oja(LearningRuleType):
    """Oja learning rule.

    Modifies connection weights according to the Hebbian Oja rule, which
    augments typically Hebbian coactivity with a "forgetting" term that is
    proportional to the weight of the connection and the square of the
    postsynaptic activity.

    Notes
    -----
    The Oja rule is dependent on pre and post neural activities,
    not decoded values, and so is not affected by changes in the
    size of pre and post ensembles. However, if you are decoding from
    the post ensemble, the Oja rule will have an increased effect on
    larger post ensembles because more connection weights are changing.
    In these cases, it may be advantageous to scale the learning rate
    on the Oja rule by ``1 / post.n_neurons``.

    Parameters
    ----------
    pre_tau : float, optional (Default: 0.005)
        Filter constant on activities of neurons in pre population.
    post_tau : float, optional (Default: None)
        Filter constant on activities of neurons in post population.
        If None, post_tau will be the same as pre_tau.
    beta : float, optional (Default: 1.0)
        A scalar weight on the forgetting term.
    learning_rate : float, optional (Default: 1e-6)
        A scalar indicating the rate at which weights will be adjusted.

    Attributes
    ----------
    beta : float
        A scalar weight on the forgetting term.
    learning_rate : float
        A scalar indicating the rate at which weights will be adjusted.
    post_tau : float
        Filter constant on activities of neurons in post population.
    pre_tau : float
        Filter constant on activities of neurons in pre population.
    """

    modifies = 'weights'
    probeable = ('pre_filtered', 'post_filtered', 'delta')

    pre_tau = NumberParam('pre_tau', low=0, low_open=True)
    post_tau = NumberParam('post_tau', low=0, low_open=True)
    beta = NumberParam('beta', low=0)

    def __init__(self, pre_tau=0.005, post_tau=None, beta=1.0,
                 learning_rate=1e-6):
        self.pre_tau = pre_tau
        self.post_tau = post_tau if post_tau is not None else pre_tau
        self.beta = beta
        super(Oja, self).__init__(learning_rate, size_in=0)

    @property
    def _argreprs(self):
        args = []
        if self.pre_tau != 0.005:
            args.append("pre_tau=%g" % self.pre_tau)
        if self.post_tau != self.pre_tau:
            args.append("post_tau=%g" % self.post_tau)
        if self.beta != 1.0:
            args.append("beta=%g" % self.beta)
        if self.learning_rate != 1e-6:
            args.append("learning_rate=%g" % self.learning_rate)
        return args
예제 #29
0
class Probe(NengoObject):
    """A probe is an object that collects data from the simulation.

    This is to be used in any situation where you wish to gather simulation
    data (spike data, represented values, neuron voltages, etc.) for analysis.

    Probes do not directly affect the simulation.

    All Nengo objects can be probed (except Probes themselves).
    Each object has different attributes that can be probed.
    To see what is probeable for each object, print its
    ``probeable`` attribute.

    >>> with nengo.Network():
    ...     ens = nengo.Ensemble(10, 1)
    >>> print(ens.probeable)
    ['decoded_output', 'input']

    Parameters
    ----------
    target : Ensemble, Neurons, Node, or Connection
        The object to probe.

    attr : str, optional (Default: None)
        The signal to probe. Refer to the target's ``probeable`` list for
        details. If None, the first element in the ``probeable`` list
        will be used.
    sample_every : float, optional (Default: None)
        Sampling period in seconds. If None, the ``dt`` of the simluation
        will be used.
    synapse : Synapse, optional (Default: None)
        A synaptic model to filter the probed signal.
    solver : Solver, optional (Default: ``ConnectionDefault``)
        `~nengo.solvers.Solver` to compute decoders
        for probes that require them.
    label : str, optional (Default: None)
        A name for the probe. Used for debugging and visualization.
    seed : int, optional (Default: None)
        The seed used for random number generation.

    Attributes
    ----------
    attr : str or None
        The signal that will be probed. If None, the first element of the
        target's ``probeable`` list will be used.
    sample_every : float or None
        Sampling period in seconds. If None, the ``dt`` of the simluation
        will be used.
    solver : Solver or None
        `~nengo.solvers.Solver` to compute decoders. Only used for probes
        of an ensemble's decoded output.
    synapse : Synapse or None
        A synaptic model to filter the probed signal.
    target : Ensemble, Neurons, Node, or Connection
        The object to probe.
    """

    target = TargetParam('target', nonzero_size_out=True)
    attr = AttributeParam('attr', default=None)
    sample_every = NumberParam('sample_every',
                               default=None,
                               optional=True,
                               low=1e-10)
    synapse = SynapseParam('synapse', default=None)
    solver = ProbeSolverParam('solver', default=ConnectionDefault)

    _param_init_order = ['target']

    def __init__(self,
                 target,
                 attr=None,
                 sample_every=Default,
                 synapse=Default,
                 solver=Default,
                 label=Default,
                 seed=Default):
        super(Probe, self).__init__(label=label, seed=seed)
        self.target = target
        self.attr = attr if attr is not None else self.obj.probeable[0]
        self.sample_every = sample_every
        self.synapse = synapse
        self.solver = solver

    def __repr__(self):
        return "<Probe%s at 0x%x of '%s' of %s>" % (
            "" if self.label is None else ' "%s"' % self.label, id(self),
            self.attr, self.target)

    def __str__(self):
        return "<Probe%s of '%s' of %s>" % ("" if self.label is None else
                                            ' "%s"' % self.label, self.attr,
                                            self.target)

    @property
    def obj(self):
        """(Nengo object) The underlying Nengo object target."""
        return (self.target.obj
                if isinstance(self.target, ObjView) else self.target)

    @property
    def size_in(self):
        """(int) Dimensionality of the probed signal."""
        return self.target.size_out

    @property
    def size_out(self):
        """(int) Cannot connect from probes, so always 0."""
        return 0

    @property
    def slice(self):
        """(slice) The slice associated with the Nengo object target."""
        return (self.target.slice
                if isinstance(self.target, ObjView) else None)
class Oja(LearningRuleType):
    """Oja learning rule.

    Modifies connection weights according to the Hebbian Oja rule, which
    augments typically Hebbian coactivity with a "forgetting" term that is
    proportional to the weight of the connection and the square of the
    postsynaptic activity.

    Notes
    -----
    The Oja rule is dependent on pre and post neural activities,
    not decoded values, and so is not affected by changes in the
    size of pre and post ensembles. However, if you are decoding from
    the post ensemble, the Oja rule will have an increased effect on
    larger post ensembles because more connection weights are changing.
    In these cases, it may be advantageous to scale the learning rate
    on the Oja rule by ``1 / post.n_neurons``.

    Parameters
    ----------
    learning_rate : float, optional
        A scalar indicating the rate at which weights will be adjusted.
    pre_synapse : `.Synapse`, optional
        Synapse model used to filter the pre-synaptic activities.
    post_synapse : `.Synapse`, optional
        Synapse model used to filter the post-synaptic activities.
        If None, ``post_synapse`` will be the same as ``pre_synapse``.
    beta : float, optional
        A scalar weight on the forgetting term.

    Attributes
    ----------
    beta : float
        A scalar weight on the forgetting term.
    learning_rate : float
        A scalar indicating the rate at which weights will be adjusted.
    post_synapse : `.Synapse`
        Synapse model used to filter the post-synaptic activities.
    pre_synapse : `.Synapse`
        Synapse model used to filter the pre-synaptic activities.
    """

    modifies = "weights"
    probeable = ("pre_filtered", "post_filtered", "delta")

    learning_rate = NumberParam("learning_rate",
                                low=0,
                                readonly=True,
                                default=1e-6)
    pre_synapse = SynapseParam("pre_synapse",
                               default=Lowpass(tau=0.005),
                               readonly=True)
    post_synapse = SynapseParam("post_synapse", default=None, readonly=True)
    beta = NumberParam("beta", low=0, readonly=True, default=1.0)

    pre_tau = _deprecated_tau("pre_tau", "pre_synapse")
    post_tau = _deprecated_tau("post_tau", "post_synapse")

    def __init__(
        self,
        learning_rate=Default,
        pre_synapse=Default,
        post_synapse=Default,
        beta=Default,
        pre_tau=Unconfigurable,
        post_tau=Unconfigurable,
    ):
        super().__init__(learning_rate, size_in=0)

        self.beta = beta

        if pre_tau is Unconfigurable:
            self.pre_synapse = pre_synapse
        else:
            self.pre_tau = pre_tau

        if post_tau is Unconfigurable:
            self.post_synapse = (self.pre_synapse
                                 if post_synapse is Default else post_synapse)
        else:
            self.post_tau = post_tau

    @property
    def _argdefaults(self):
        return (
            ("learning_rate", Oja.learning_rate.default),
            ("pre_synapse", Oja.pre_synapse.default),
            ("post_synapse", self.pre_synapse),
            ("beta", Oja.beta.default),
        )