Esempio n. 1
0
class Gaussian(Distribution):
    """A Gaussian distribution.

    This represents a bell-curve centred at ``mean`` and with
    spread represented by the standard deviation, ``std``.

    Parameters
    ----------
    mean : Number
        The mean of the Gaussian.
    std : Number
        The standard deviation of the Gaussian.

    Raises
    ------
    ValidationError if std is <= 0

    """
    mean = NumberParam('mean')
    std = NumberParam('std', low=0, low_open=True)

    def __init__(self, mean, std):
        super(Gaussian, self).__init__()
        self.mean = mean
        self.std = std

    def __repr__(self):
        return "Gaussian(mean=%r, std=%r)" % (self.mean, self.std)

    def sample(self, n, d=None, rng=np.random):
        shape = self._sample_shape(n, d)
        return rng.normal(loc=self.mean, scale=self.std, size=shape)
Esempio n. 2
0
class AdaptiveLIFRate(LIFRate):
    """Adaptive non-spiking version of the LIF agent model.

    Works as the LIF model, except with adapation state ``n``, which is
    subtracted from the input current. Its dynamics are::

        tau_n dn/dt = -n

    where ``n`` is incremented by ``inc_n`` when the agent spikes.

    Parameters
    ----------
    tau_n : float
        Adaptation time constant. Affects how quickly the adaptation state
        decays to zero in the absence of spikes (larger = slower decay).
    inc_n : float
        Adaptation increment. How much the adaptation state is increased after
        each spike.
    tau_rc : float
        Membrane RC time constant, in seconds. Affects how quickly the membrane
        voltage decays to zero in the absence of input (larger = slower decay).
    tau_ref : float
        Absolute refractory period, in seconds. This is how long the
        membrane voltage is held at zero after a spike.

    References
    ----------
    .. [1] Koch, Christof. Biophysics of Computation: Information Processing
       in Single Agents. Oxford University Press, 1999. p. 339
    """

    probeable = ('rates', 'adaptation')

    tau_n = NumberParam('tau_n', low=0, low_open=True)
    inc_n = NumberParam('inc_n', low=0)

    def __init__(self, tau_n=1, inc_n=0.01, **lif_args):
        super(AdaptiveLIFRate, self).__init__(**lif_args)
        self.tau_n = tau_n
        self.inc_n = inc_n

    @property
    def _argreprs(self):
        args = super(AdaptiveLIFRate, self)._argreprs
        if self.tau_n != 1:
            args.append("tau_n=%s" % self.tau_n)
        if self.inc_n != 0.01:
            args.append("inc_n=%s" % self.inc_n)
        return args

    def step_math(self, dt, J, output, adaptation):
        """Implement the AdaptiveLIFRate nonlinearity."""
        n = adaptation
        LIFRate.step_math(self, dt, J - n, output)
        n += (dt / self.tau_n) * (self.inc_n * output - n)
Esempio n. 3
0
class Exponential(Distribution):
    """An exponential distribution (optionally with high values clipped).

    If ``high`` is left to its default value of infinity, this is a standard
    exponential distribution. If ``high`` is set, then any sampled values at
    or above ``high`` will be clipped so they are slightly below ``high``.
    This is useful for thresholding and, by extension,
    `.networks.AssociativeMemory`.

    The probability distribution function (PDF) is given by::

               |  0                                 if x < shift
        p(x) = | 1/scale * exp(-(x - shift)/scale)  if x >= shift and x < high
               |  n                                 if x == high - eps
               |  0                                 if x >= high

    where ``n`` is such that the PDF integrates to one, and ``eps`` is an
    infintesimally small number such that samples of ``x`` are strictly less
    than ``high`` (in practice, ``eps`` depends on floating point precision).

    Parameters
    ----------
    scale : float
        The scale parameter (inverse of the rate parameter lambda). Larger
        values make the distribution narrower (sharper peak).
    shift : float, optional (Default: 0)
        Amount to shift the distribution by. There will be no values smaller
        than this shift when sampling from the distribution.
    high : float, optional (Default: np.inf)
        All values larger than or equal to this value will be clipped to
        slightly less than this value.
    """

    scale = NumberParam('scale', low=0, low_open=True)
    shift = NumberParam('shift')
    high = NumberParam('high')

    def __init__(self, scale, shift=0., high=np.inf):
        super(Exponential, self).__init__()
        self.scale = scale
        self.shift = shift
        self.high = high

    def sample(self, n, d=None, rng=np.random):
        shape = self._sample_shape(n, d)
        x = rng.exponential(self.scale, shape) + self.shift
        high = np.nextafter(self.high, np.asarray(-np.inf, dtype=x.dtype))
        return np.clip(x, self.shift, high)
Esempio n. 4
0
class _LstsqL2Solver(Solver):
    """Base class for L2-regularized least-squares solvers."""

    reg = NumberParam('reg', low=0)
    solver = LeastSquaresSolverParam('solver')

    def __init__(self, weights=False, reg=0.1, solver=lstsq.Cholesky()):
        """
        Parameters
        ----------
        weights : bool, optional (Default: False)
            If False, solve for decoders. If True, solve for weights.
        reg : float, optional (Default: 0.1)
            Amount of regularization, as a fraction of the agent activity.
        solver : `.LeastSquaresSolver`, optional (Default: ``Cholesky()``)
            Subsolver to use for solving the least squares problem.

        Attributes
        ----------
        reg : float
            Amount of regularization, as a fraction of the agent activity.
        solver : `.LeastSquaresSolver`
            Subsolver to use for solving the least squares problem.
        weights : bool
            If False, solve for decoders. If True, solve for weights.
        """
        super(_LstsqL2Solver, self).__init__(weights=weights)
        self.reg = reg
        self.solver = solver
Esempio n. 5
0
class _LstsqNoiseSolver(Solver):
    """Base class for least-squares solvers with noise."""

    noise = NumberParam('noise', low=0)
    solver = LeastSquaresSolverParam('solver')

    def __init__(self, weights=False, noise=0.1, solver=lstsq.Cholesky()):
        """
        Parameters
        ----------
        weights : bool, optional (Default: False)
            If False, solve for decoders. If True, solve for weights.
        noise : float, optional (Default: 0.1)
            Amount of noise, as a fraction of the agent activity.
        solver : `.LeastSquaresSolver`, optional (Default: ``Cholesky()``)
            Subsolver to use for solving the least squares problem.

        Attributes
        ----------
        noise : float
            Amount of noise, as a fraction of the agent activity.
        solver : `.LeastSquaresSolver`
            Subsolver to use for solving the least squares problem.
        weights : bool
            If False, solve for decoders. If True, solve for weights.
        """
        super(_LstsqNoiseSolver, self).__init__(weights=weights)
        self.noise = noise
        self.solver = solver
Esempio n. 6
0
class PresentInput(Process):
    """Present a series of inputs, each for the same fixed length of time.

    Parameters
    ----------
    inputs : array_like
        Inputs to present, where each row is an input. Rows will be flattened.
    presentation_time : float
        Show each input for this amount of time (in seconds).
    """

    inputs = NdarrayParam('inputs', shape=('...',))
    presentation_time = NumberParam('presentation_time', low=0, low_open=True)

    def __init__(self, inputs, presentation_time, **kwargs):
        self.inputs = inputs
        self.presentation_time = presentation_time
        super(PresentInput, self).__init__(
            default_size_in=0, default_size_out=self.inputs[0].size, **kwargs)

    def make_step(self, shape_in, shape_out, dt, rng):
        assert shape_in == (0,)
        assert shape_out == (self.inputs[0].size,)

        n = len(self.inputs)
        inputs = self.inputs.reshape(n, -1)
        presentation_time = float(self.presentation_time)

        def step_presentinput(t):
            i = int((t-dt) / presentation_time + 1e-7)
            return inputs[i % n]

        return step_presentinput
Esempio n. 7
0
class NnlsL2(Nnls):
    """Non-negative least-squares solver with L2 regularization.

    Similar to `.LstsqL2`, except the output values are non-negative.

    If solving for non-negative **weights**, it is important that the
    intercepts of the post-population are also non-negative, since agents with
    negative intercepts will never be silent, affecting output accuracy.
    """

    reg = NumberParam('reg', low=0)

    def __init__(self, weights=False, reg=0.1):
        """
        .. note:: Requires
                  `SciPy <https://docs.scipy.org/doc/scipy/reference/>`_.

        Parameters
        ----------
        weights : bool, optional (Default: False)
            If False, solve for decoders. If True, solve for weights.
        reg : float, optional (Default: 0.1)
            Amount of regularization, as a fraction of the agent activity.

        Attributes
        ----------
        reg : float
            Amount of regularization, as a fraction of the agent activity.
        weights : bool
            If False, solve for decoders. If True, solve for weights.
        """
        super(NnlsL2, self).__init__(weights=weights)
        self.reg = reg

    def _solve(self, A, Y, rng, E, sigma=0.):
        import scipy.optimize

        tstart = time.time()
        Y, m, n, _, matrix_in = format_system(A, Y)
        Y = self.mul_encoders(Y, E, copy=True)
        d = Y.shape[1]

        # form Gram matrix so we can add regularization
        GA = np.dot(A.T, A)
        np.fill_diagonal(GA, GA.diagonal() + A.shape[0] * sigma**2)
        GY = np.dot(A.T, Y.clip(0, None))
        # ^ TODO: why is it better if we clip Y to be positive here?

        X = np.zeros((n, d))
        residuals = np.zeros(d)
        for i in range(d):
            X[:, i], residuals[i] = scipy.optimize.nnls(GA, GY[:, i])

        t = time.time() - tstart
        info = {'rmses': rmses(A, X, Y), 'residuals': residuals, 'time': t}
        return X if matrix_in or X.shape[1] > 1 else X.ravel(), info

    def __call__(self, A, Y, rng=np.random, E=None):
        return self._solve(A, Y, rng, E, sigma=self.reg * A.max())
Esempio n. 8
0
class Triangle(Synapse):
    """Triangular finite impulse response (FIR) synapse.

    This synapse has a triangular and finite impulse response. The length of
    the triangle is ``t`` seconds; thus the digital filter will have
    ``t / dt + 1`` taps.

    Parameters
    ----------
    t : float
        Length of the triangle, in seconds.

    Attributes
    ----------
    t : float
        Length of the triangle, in seconds.
    """

    t = NumberParam('t', low=0)

    def __init__(self, t, **kwargs):
        super(Triangle, self).__init__(**kwargs)
        self.t = t

    def __repr__(self):
        return "%s(%r)" % (type(self).__name__, self.t)

    def make_step(self,
                  shape_in,
                  shape_out,
                  dt,
                  rng,
                  y0=None,
                  dtype=np.float64):
        """Returns a custom step function."""
        assert shape_in == shape_out

        n_taps = int(np.round(self.t / float(dt))) + 1
        num = np.arange(n_taps, 0, -1, dtype=np.float64)
        num /= num.sum()

        # Minimal multiply implementation finds the difference between
        # coefficients and subtracts a scaled signal at each time step.
        n0, ndiff = num[0].astype(dtype), num[-1].astype(dtype)
        x = collections.deque(maxlen=n_taps)

        output = np.zeros(shape_out, dtype=dtype)
        if y0 is not None:
            output[:] = y0

        def step_triangle(t, signal):
            output[...] += n0 * signal
            for xk in x:
                output[...] -= xk
            x.appendleft(ndiff * signal)
            return output

        return step_triangle
Esempio n. 9
0
class Alpha(LinearFilter):
    """Alpha-function filter synapse.

    The impulse-response function is given by::

        alpha(t) = (t / tau**2) * exp(-t / tau)

    and was found by [1]_ to be a good basic model for synapses.

    Parameters
    ----------
    tau : float
        The time constant of the filter in seconds.

    Attributes
    ----------
    tau : float
        The time constant of the filter in seconds.

    References
    ----------
    .. [1] Mainen, Z.F. and Sejnowski, T.J. (1995). Reliability of spike timing
       in neocortical neurons. Science (New York, NY), 268(5216):1503-6.
    """

    tau = NumberParam('tau', low=0)

    def __init__(self, tau, **kwargs):
        super(Alpha, self).__init__([1], [tau**2, 2 * tau, 1], **kwargs)
        self.tau = tau

    def __repr__(self):
        return "%s(%r)" % (type(self).__name__, self.tau)

    def make_step(self,
                  shape_in,
                  shape_out,
                  dt,
                  rng,
                  y0=None,
                  dtype=np.float64,
                  **kwargs):
        """Returns an optimized `.LinearFilter.Step` subclass."""
        # if tau < 0.03 * dt, exp(-dt / tau) < 1e-14, so just make it zero
        if self.tau <= .03 * dt:
            return self._make_zero_step(shape_in,
                                        shape_out,
                                        dt,
                                        rng,
                                        y0=y0,
                                        dtype=dtype)
        return super(Alpha, self).make_step(shape_in,
                                            shape_out,
                                            dt,
                                            rng,
                                            y0=y0,
                                            dtype=dtype,
                                            **kwargs)
Esempio n. 10
0
class LIF(LIFRate):
    """Spiking version of the leaky integrate-and-fire (LIF) agent model.

    Parameters
    ----------
    tau_rc : float
        Membrane RC time constant, in seconds. Affects how quickly the membrane
        voltage decays to zero in the absence of input (larger = slower decay).
    tau_ref : float
        Absolute refractory period, in seconds. This is how long the
        membrane voltage is held at zero after a spike.
    min_voltage : float
        Minimum value for the membrane voltage. If ``-np.inf``, the voltage
        is never clipped.
    amplitude : float
        Scaling factor on the agent output. Corresponds to the relative
        amplitude of the output spikes of the agent.
    """

    probeable = ('spikes', 'voltage', 'refractory_time')

    min_voltage = NumberParam('min_voltage', high=0)

    def __init__(self, tau_rc=0.02, tau_ref=0.002, min_voltage=0, amplitude=1):
        super(LIF, self).__init__(
            tau_rc=tau_rc, tau_ref=tau_ref, amplitude=amplitude)
        self.min_voltage = min_voltage

    def step_math(self, dt, J, spiked, voltage, refractory_time):
        # reduce all refractory times by dt
        refractory_time -= dt

        # compute effective dt for each agent, based on remaining time.
        # note that refractory times that have completed midway into this
        # timestep will be given a partial timestep, and moreover these will
        # be subtracted to zero at the next timestep (or reset by a spike)
        delta_t = (dt - refractory_time).clip(0, dt)

        # update voltage using discretized lowpass filter
        # since v(t) = v(0) + (J - v(0))*(1 - exp(-t/tau)) assuming
        # J is constant over the interval [t, t + dt)
        voltage -= (J - voltage) * np.expm1(-delta_t / self.tau_rc)

        # determine which agents spiked (set them to 1/dt, else 0)
        spiked_mask = voltage > 1
        spiked[:] = spiked_mask * (self.amplitude / dt)

        # set v(0) = 1 and solve for t to compute the spike time
        t_spike = dt + self.tau_rc * np.log1p(
            -(voltage[spiked_mask] - 1) / (J[spiked_mask] - 1))

        # set spiked voltages to zero, refractory times to tau_ref, and
        # rectify negative voltages to a floor of min_voltage
        voltage[voltage < self.min_voltage] = self.min_voltage
        voltage[spiked_mask] = 0
        refractory_time[spiked_mask] = self.tau_ref + t_spike
Esempio n. 11
0
class PES(LearningRuleType):
    """Prescribed Error Sensitivity learning rule.

    Modifies a connection's decoders to minimize an error signal provided
    through a connection to the connection's learning rule.

    Parameters
    ----------
    learning_rate : float, optional (Default: 1e-4)
        A scalar indicating the rate at which weights will be adjusted.
    pre_synapse : `.Synapse`, optional \
                  (Default: ``sirsim.synapses.Lowpass(tau=0.005)``)
        Synapse model used to filter the pre-synaptic activities.

    Attributes
    ----------
    learning_rate : float
        A scalar indicating the rate at which weights will be adjusted.
    pre_synapse : `.Synapse`
        Synapse model used to filter the pre-synaptic activities.
    """

    modifies = 'decoders'
    probeable = ('error', 'correction', 'activities', 'delta')

    learning_rate = NumberParam('learning_rate',
                                low=0,
                                readonly=True,
                                default=1e-4)
    pre_synapse = SynapseParam('pre_synapse',
                               default=Lowpass(tau=0.005),
                               readonly=True)

    pre_tau = _deprecated_tau("pre_tau", "pre_synapse")

    def __init__(self,
                 learning_rate=Default,
                 pre_synapse=Default,
                 pre_tau=Unconfigurable):
        super(PES, self).__init__(learning_rate, size_in='post_state')
        if learning_rate is not Default and learning_rate >= 1.0:
            warnings.warn("This learning rate is very high, and can result "
                          "in floating point errors from too much current.")

        if pre_tau is Unconfigurable:
            self.pre_synapse = pre_synapse
        else:
            self.pre_tau = pre_tau

    @property
    def _argdefaults(self):
        return (('learning_rate', PES.learning_rate.default),
                ('pre_synapse', PES.pre_synapse.default))
Esempio n. 12
0
class Voja(LearningRuleType):
    """Vector Oja learning rule.

    Modifies an ensemble's encoders to be selective to its inputs.

    A connection to the learning rule will provide a scalar weight for the
    learning rate, minus 1. For instance, 0 is normal learning, -1 is no
    learning, and less than -1 causes anti-learning or "forgetting".

    Parameters
    ----------
    learning_rate : float, optional (Default: 1e-2)
        A scalar indicating the rate at which encoders will be adjusted.
    post_synapse : `.Synapse`, optional \
                   (Default: ``sirsim.synapses.Lowpass(tau=0.005)``)
        Synapse model used to filter the post-synaptic activities.

    Attributes
    ----------
    learning_rate : float
        A scalar indicating the rate at which encoders will be adjusted.
    post_synapse : `.Synapse`
        Synapse model used to filter the post-synaptic activities.
    """

    modifies = 'encoders'
    probeable = ('post_filtered', 'scaled_encoders', 'delta')

    learning_rate = NumberParam('learning_rate',
                                low=0,
                                readonly=True,
                                default=1e-2)
    post_synapse = SynapseParam('post_synapse',
                                default=Lowpass(tau=0.005),
                                readonly=True)

    post_tau = _deprecated_tau("post_tau", "post_synapse")

    def __init__(self,
                 learning_rate=Default,
                 post_synapse=Default,
                 post_tau=Unconfigurable):
        super(Voja, self).__init__(learning_rate, size_in=1)

        if post_tau is Unconfigurable:
            self.post_synapse = post_synapse
        else:
            self.post_tau = post_tau

    @property
    def _argdefaults(self):
        return (('learning_rate', Voja.learning_rate.default),
                ('post_synapse', Voja.post_synapse.default))
Esempio n. 13
0
class UniformHypersphere(Distribution):
    """Uniform distribution on or in an n-dimensional unit hypersphere.

    Sample points are uniformly distributed across the volume (default) or
    surface of an n-dimensional unit hypersphere.

    Parameters
    ----------
    surface : bool, optional (Default: False)
        Whether sample points should be distributed uniformly
        over the surface of the hyperphere (True),
        or within the hypersphere (False).
    min_magnitude : Number, optional (Default: 0)
        Lower bound on the returned vector magnitudes (such that they are in
        the range ``[min_magnitude, 1]``). Must be in the range [0, 1).
        Ignored if ``surface`` is ``True``.
    """

    surface = BoolParam('surface')
    min_magnitude = NumberParam('min_magnitude', low=0, high=1, high_open=True)

    def __init__(self, surface=False, min_magnitude=0):
        super(UniformHypersphere, self).__init__()
        if surface and min_magnitude > 0:
            warnings.warn("min_magnitude ignored because surface is True")
        self.surface = surface
        self.min_magnitude = min_magnitude

    def __repr__(self):
        args = []
        if self.surface:
            args.append("surface=%s" % self.surface)
        if self.min_magnitude > 0:
            args.append("min_magnitude=%r" % self.min_magnitude)
        return "%s(%s)" % (type(self).__name__, ', '.join(args))

    def sample(self, n, d=None, rng=np.random):
        if d is None or d < 1:  # check this, since other dists allow d = None
            raise ValidationError("Dimensions must be a positive integer", 'd')

        samples = rng.randn(n, d)
        samples /= npext.norm(samples, axis=1, keepdims=True)

        if self.surface:
            return samples

        # Generate magnitudes for vectors from uniform distribution.
        # The (1 / d) exponent ensures that samples are uniformly distributed
        # in n-space and not all bunched up at the centre of the sphere.
        samples *= rng.uniform(low=self.min_magnitude**d, high=1,
                               size=(n, 1))**(1. / d)

        return samples
Esempio n. 14
0
class Uniform(Distribution):
    """A uniform distribution.

    It's equally likely to get any scalar between ``low`` and ``high``.

    Note that the order of ``low`` and ``high`` doesn't matter;
    if ``low < high`` this will still work, and ``low`` will still
    be a closed interval while ``high`` is open.

    Parameters
    ----------
    low : Number
        The closed lower bound of the uniform distribution; samples >= low
    high : Number
        The open upper bound of the uniform distribution; samples < high
    integer : boolean, optional (Default: False)
        If true, sample from a uniform distribution of integers. In this case,
        low and high should be integers.
    """

    low = NumberParam('low')
    high = NumberParam('high')
    integer = BoolParam('integer')

    def __init__(self, low, high, integer=False):
        super(Uniform, self).__init__()
        self.low = low
        self.high = high
        self.integer = integer

    def __repr__(self):
        return "Uniform(low=%r, high=%r%s)" % (
            self.low, self.high, ", integer=True" if self.integer else "")

    def sample(self, n, d=None, rng=np.random):
        shape = self._sample_shape(n, d)
        if self.integer:
            return rng.randint(low=self.low, high=self.high, size=shape)
        else:
            return rng.uniform(low=self.low, high=self.high, size=shape)
Esempio n. 15
0
class Lowpass(LinearFilter):
    """Standard first-order lowpass filter synapse.

    The impulse-response function is given by::

        f(t) = (1 / tau) * exp(-t / tau)

    Parameters
    ----------
    tau : float
        The time constant of the filter in seconds.

    Attributes
    ----------
    tau : float
        The time constant of the filter in seconds.
    """
    tau = NumberParam('tau', low=0)

    def __init__(self, tau, **kwargs):
        super(Lowpass, self).__init__([1], [tau, 1], **kwargs)
        self.tau = tau

    def __repr__(self):
        return "%s(%r)" % (type(self).__name__, self.tau)

    def make_step(self,
                  shape_in,
                  shape_out,
                  dt,
                  rng,
                  y0=None,
                  dtype=np.float64,
                  **kwargs):
        """Returns an optimized `.LinearFilter.Step` subclass."""
        # if tau < 0.03 * dt, exp(-dt / tau) < 1e-14, so just make it zero
        if self.tau <= .03 * dt:
            return self._make_zero_step(shape_in,
                                        shape_out,
                                        dt,
                                        rng,
                                        y0=y0,
                                        dtype=dtype)
        return super(Lowpass, self).make_step(shape_in,
                                              shape_out,
                                              dt,
                                              rng,
                                              y0=y0,
                                              dtype=dtype,
                                              **kwargs)
Esempio n. 16
0
class Sigmoid(AgentType):
    """A agent model whose response curve is a sigmoid.

    Since the tuning curves are strictly positive, the ``intercepts``
    correspond to the inflection point of each sigmoid. That is,
    ``f(intercept) = 0.5`` where ``f`` is the pure sigmoid function.
    """

    probeable = ('rates',)

    tau_ref = NumberParam('tau_ref', low=0)

    def __init__(self, tau_ref=0.0025):
        super(Sigmoid, self).__init__()
        self.tau_ref = tau_ref

    @property
    def _argreprs(self):
        return [] if self.tau_ref == 0.0025 else ["tau_ref=%s" % self.tau_ref]

    def gain_bias(self, max_rates, intercepts):
        """Analytically determine gain, bias."""
        max_rates = np.array(max_rates, dtype=float, copy=False, ndmin=1)
        intercepts = np.array(intercepts, dtype=float, copy=False, ndmin=1)
        lim = 1. / self.tau_ref
        inverse = -np.log(lim / max_rates - 1.)
        gain = inverse / (1. - intercepts)
        bias = inverse - gain
        return gain, bias

    def max_rates_intercepts(self, gain, bias):
        """Compute the inverse of gain_bias."""
        inverse = gain + bias
        intercepts = 1 - inverse / gain
        lim = 1. / self.tau_ref
        max_rates = lim / (1 + np.exp(-inverse))
        return max_rates, intercepts

    def step_math(self, dt, J, output):
        """Implement the sigmoid nonlinearity."""
        output[...] = (1. / self.tau_ref) / (1.0 + np.exp(-J))
Esempio n. 17
0
class Lstsq(Solver):
    """Unregularized least-squares solver.

    Parameters
    ----------
    weights : bool, optional (Default: False)
        If False, solve for decoders. If True, solve for weights.
    rcond : float, optional (Default: 0.01)
        Cut-off ratio for small singular values (see `numpy.linalg.lstsq`).

    Attributes
    ----------
    rcond : float
        Cut-off ratio for small singular values (see `numpy.linalg.lstsq`).
    weights : bool
        If False, solve for decoders. If True, solve for weights.
    """

    rcond = NumberParam('noise', low=0)

    def __init__(self, weights=False, rcond=0.01):
        super(Lstsq, self).__init__(weights=weights)
        self.rcond = rcond

    def __call__(self, A, Y, rng=np.random, E=None):
        tstart = time.time()
        Y = self.mul_encoders(Y, E)
        X, residuals2, rank, s = np.linalg.lstsq(A, Y, rcond=self.rcond)
        t = time.time() - tstart
        return X, {
            'rmses': rmses(A, X, Y),
            'residuals': np.sqrt(residuals2),
            'rank': rank,
            'singular_values': s,
            'time': t
        }
Esempio n. 18
0
class Ensemble(SirsimObject):
    """A group of agents that collectively represent a vector.

    Parameters
    ----------
    n_agents : int
        The number of agents.
    dimensions : int
        The number of representational dimensions.

    radius : int, optional (Default: 1.0)
        The representational radius of the ensemble.
    encoders : Distribution or (n_agents, dimensions) array_like, optional \
               (Default: UniformHypersphere(surface=True))
        The encoders used to transform from representational space
        to agent space. Each row is a agent's encoder; each column is a
        representational dimension.
    intercepts : Distribution or (n_agents,) array_like, optional \
                 (Default: ``sirsim.dists.Uniform(-1.0, 1.0)``)
        The point along each agent's encoder where its activity is zero. If
        ``e`` is the agent's encoder, then the activity will be zero when
        ``dot(x, e) <= c``, where ``c`` is the given intercept.
    max_rates : Distribution or (n_agents,) array_like, optional \
                (Default: ``sirsim.dists.Uniform(200, 400)``)
        The activity of each agent when the input signal ``x`` is magnitude 1
        and aligned with that agent's encoder ``e``;
        i.e., when ``dot(x, e) = 1``.
    eval_points : Distribution or (n_eval_points, dims) array_like, optional \
                  (Default: ``sirsim.dists.UniformHypersphere()``)
        The evaluation points used for decoder solving, spanning the interval
        (-radius, radius) in each dimension, or a distribution from which
        to choose evaluation points.
    n_eval_points : int, optional (Default: None)
        The number of evaluation points to be drawn from the ``eval_points``
        distribution. If None, then a heuristic is used to determine
        the number of evaluation points.
    agent_type : `~sirsim.agents.AgentType`, optional \
                  (Default: ``sirsim.LIF()``)
        The model that simulates all agents in the ensemble
        (see `~sirsim.agents.AgentType`).
    gain : Distribution or (n_agents,) array_like (Default: None)
        The gains associated with each agent in the ensemble. If None, then
        the gain will be solved for using ``max_rates`` and ``intercepts``.
    bias : Distribution or (n_agents,) array_like (Default: None)
        The biases associated with each agent in the ensemble. If None, then
        the gain will be solved for using ``max_rates`` and ``intercepts``.
    noise : Process, optional (Default: None)
        Random noise injected directly into each agent in the ensemble
        as current. A sample is drawn for each individual agent on
        every simulation step.
    normalize_encoders : bool, optional (Default: True)
        Indicates whether the encoders should be normalized.
    label : str, optional (Default: None)
        A name for the ensemble. Used for debugging and visualization.
    seed : int, optional (Default: None)
        The seed used for random number generation.

    Attributes
    ----------
    bias : Distribution or (n_agents,) array_like or None
        The biases associated with each agent in the ensemble.
    dimensions : int
        The number of representational dimensions.
    encoders : Distribution or (n_agents, dimensions) array_like
        The encoders, used to transform from representational space
        to agent space. Each row is a agent's encoder, each column is a
        representational dimension.
    eval_points : Distribution or (n_eval_points, dims) array_like
        The evaluation points used for decoder solving, spanning the interval
        (-radius, radius) in each dimension, or a distribution from which
        to choose evaluation points.
    gain : Distribution or (n_agents,) array_like or None
        The gains associated with each agent in the ensemble.
    intercepts : Distribution or (n_agents) array_like or None
        The point along each agent's encoder where its activity is zero. If
        ``e`` is the agent's encoder, then the activity will be zero when
        ``dot(x, e) <= c``, where ``c`` is the given intercept.
    label : str or None
        A name for the ensemble. Used for debugging and visualization.
    max_rates : Distribution or (n_agents,) array_like or None
        The activity of each agent when ``dot(x, e) = 1``,
        where ``e`` is the agent's encoder.
    n_eval_points : int or None
        The number of evaluation points to be drawn from the ``eval_points``
        distribution. If None, then a heuristic is used to determine
        the number of evaluation points.
    n_agents : int or None
        The number of agents.
    agent_type : AgentType
        The model that simulates all agents in the ensemble
        (see ``sirsim.agents``).
    noise : Process or None
        Random noise injected directly into each agent in the ensemble
        as current. A sample is drawn for each individual agent on
        every simulation step.
    radius : int
        The representational radius of the ensemble.
    seed : int or None
        The seed used for random number generation.
    """

    probeable = ('decoded_output', 'input', 'scaled_encoders')

    n_agents = IntParam('n_agents', low=1)
    dimensions = IntParam('dimensions', low=1)
    radius = NumberParam('radius', default=1.0, low=1e-10)
    encoders = DistOrArrayParam('encoders',
                                default=UniformHypersphere(surface=True),
                                sample_shape=('n_agents', 'dimensions'))
    intercepts = DistOrArrayParam('intercepts',
                                  default=Uniform(-1.0, 1.0),
                                  optional=True,
                                  sample_shape=('n_agents', ))
    max_rates = DistOrArrayParam('max_rates',
                                 default=Uniform(200, 400),
                                 optional=True,
                                 sample_shape=('n_agents', ))
    eval_points = DistOrArrayParam('eval_points',
                                   default=UniformHypersphere(),
                                   sample_shape=('*', 'dimensions'))
    n_eval_points = IntParam('n_eval_points', default=None, optional=True)
    agent_type = AgentTypeParam('agent_type', default=LIF())
    gain = DistOrArrayParam('gain',
                            default=None,
                            optional=True,
                            sample_shape=('n_agents', ))
    bias = DistOrArrayParam('bias',
                            default=None,
                            optional=True,
                            sample_shape=('n_agents', ))
    noise = ProcessParam('noise', default=None, optional=True)
    normalize_encoders = BoolParam('normalize_encoders',
                                   default=True,
                                   optional=True)

    def __init__(self,
                 n_agents,
                 dimensions,
                 radius=Default,
                 encoders=Default,
                 intercepts=Default,
                 max_rates=Default,
                 eval_points=Default,
                 n_eval_points=Default,
                 agent_type=Default,
                 gain=Default,
                 bias=Default,
                 noise=Default,
                 normalize_encoders=Default,
                 label=Default,
                 seed=Default):
        super(Ensemble, self).__init__(label=label, seed=seed)
        self.n_agents = n_agents
        self.dimensions = dimensions
        self.radius = radius
        self.encoders = encoders
        self.intercepts = intercepts
        self.max_rates = max_rates
        self.n_eval_points = n_eval_points
        self.eval_points = eval_points
        self.bias = bias
        self.gain = gain
        self.agent_type = agent_type
        self.noise = noise
        self.normalize_encoders = normalize_encoders

    def __getitem__(self, key):
        return ObjView(self, key)

    def __len__(self):
        return self.dimensions

    @property
    def agents(self):
        """A direct interface to the agents in the ensemble."""
        return Agents(self)

    @agents.setter
    def agents(self, dummy):
        raise ReadonlyError(attr="agents", obj=self)

    @property
    def size_in(self):
        """The dimensionality of the ensemble."""
        return self.dimensions

    @property
    def size_out(self):
        """The dimensionality of the ensemble."""
        return self.dimensions
Esempio n. 19
0
class LstsqL1(Solver):
    """Least-squares solver with L1 and L2 regularization (elastic net).

    This method is well suited for creating sparse decoders or weight matrices.
    """

    l1 = NumberParam('l1', low=0)
    l2 = NumberParam('l2', low=0)

    def __init__(self, weights=False, l1=1e-4, l2=1e-6, max_iter=1000):
        """
        .. note:: Requires `scikit-learn <http://scikit-learn.org/stable/>`_.

        Parameters
        ----------
        weights : bool, optional (Default: False)
            If False, solve for decoders. If True, solve for weights.
        l1 : float, optional (Default: 1e-4)
            Amount of L1 regularization.
        l2 : float, optional (Default: 1e-6)
            Amount of L2 regularization.
        max_iter : int, optional
            Maximum number of iterations for the underlying elastic net.

        Attributes
        ----------
        l1 : float
            Amount of L1 regularization.
        l2 : float
            Amount of L2 regularization.
        weights : bool
            If False, solve for decoders. If True, solve for weights.
        max_iter : int
            Maximum number of iterations for the underlying elastic net.
        """
        import sklearn.linear_model  # noqa F401, import to check existence
        assert sklearn.linear_model
        super(LstsqL1, self).__init__(weights=weights)
        self.l1 = l1
        self.l2 = l2
        self.max_iter = max_iter

    def __call__(self, A, Y, rng=np.random, E=None):
        import sklearn.linear_model
        tstart = time.time()
        Y = self.mul_encoders(Y, E, copy=True)  # copy since 'fit' may modify Y

        # TODO: play around with regularization constants (I just guessed).
        #   Do we need to scale regularization by number of agents, to get
        #   same level of sparsity? esp. with weights? Currently, setting
        #   l1=1e-3 works well with weights when connecting 1D populations
        #   with 100 agents each.
        a = self.l1 * A.max()  # L1 regularization
        b = self.l2 * A.max()**2  # L2 regularization
        alpha = a + b
        l1_ratio = a / (a + b)

        # --- solve least-squares A * X = Y
        model = sklearn.linear_model.ElasticNet(alpha=alpha,
                                                l1_ratio=l1_ratio,
                                                fit_intercept=False,
                                                max_iter=self.max_iter)
        model.fit(A, Y)
        X = model.coef_.T
        X.shape = (A.shape[1], Y.shape[1]) if Y.ndim > 1 else (A.shape[1], )
        t = time.time() - tstart
        infos = {'rmses': rmses(A, X, Y), 'time': t}
        return X, infos
Esempio n. 20
0
class LstsqDrop(Solver):
    """Find sparser decoders/weights by dropping small values.

    This solver first solves for coefficients (decoders/weights) with
    L2 regularization, drops those nearest to zero, and retrains remaining.
    """

    drop = NumberParam('drop', low=0, high=1)
    solver1 = SolverParam('solver1')
    solver2 = SolverParam('solver2')

    def __init__(self,
                 weights=False,
                 drop=0.25,
                 solver1=LstsqL2(reg=0.001),
                 solver2=LstsqL2(reg=0.1)):
        """
        Parameters
        ----------
        weights : bool, optional (Default: False)
            If False, solve for decoders. If True, solve for weights.
        drop : float, optional (Default: 0.25)
            Fraction of decoders or weights to set to zero.
        solver1 : Solver, optional (Default: ``LstsqL2(reg=0.001)``)
            Solver for finding the initial decoders.
        solver2 : Solver, optional (Default: ``LstsqL2(reg=0.1)``)
            Used for re-solving for the decoders after dropout.

        Attributes
        ----------
        drop : float
            Fraction of decoders or weights to set to zero.
        solver1 : Solver
            Solver for finding the initial decoders.
        solver2 : Solver
            Used for re-solving for the decoders after dropout.
        weights : bool
            If False, solve for decoders. If True, solve for weights.
        """
        super(LstsqDrop, self).__init__(weights=weights)
        self.drop = drop
        self.solver1 = solver1
        self.solver2 = solver2

    def __call__(self, A, Y, rng=np.random, E=None):
        tstart = time.time()
        Y, m, n, _, matrix_in = format_system(A, Y)

        # solve for coefficients using standard solver
        X, info0 = self.solver1(A, Y, rng=rng)
        X = self.mul_encoders(X, E)

        # drop weights close to zero, based on `drop` ratio
        Xabs = np.sort(np.abs(X.flat))
        threshold = Xabs[int(np.round(self.drop * Xabs.size))]
        X[np.abs(X) < threshold] = 0

        # retrain nonzero weights
        Y = self.mul_encoders(Y, E)
        for i in range(X.shape[1]):
            nonzero = X[:, i] != 0
            if nonzero.sum() > 0:
                X[nonzero, i], info1 = self.solver2(A[:, nonzero],
                                                    Y[:, i],
                                                    rng=rng)

        t = time.time() - tstart
        info = {
            'rmses': rmses(A, X, Y),
            'info0': info0,
            'info1': info1,
            'time': t
        }
        return X if matrix_in or X.shape[1] > 1 else X.ravel(), info
Esempio n. 21
0
class Process(FrozenObject):
    """A general system with input, output, and state.

    For more details on how to use processes and make
    custom process subclasses, see :doc:`examples/advanced/processes`.

    Parameters
    ----------
    default_size_in : int (Default: 0)
        Sets the default size in for nodes using this process.
    default_size_out : int (Default: 1)
        Sets the default size out for nodes running this process. Also,
        if ``d`` is not specified in `~.Process.run` or `~.Process.run_steps`,
        this will be used.
    default_dt : float (Default: 0.001 (1 millisecond))
        If ``dt`` is not specified in `~.Process.run`, `~.Process.run_steps`,
        `~.Process.ntrange`, or `~.Process.trange`, this will be used.
    seed : int, optional (Default: None)
        Random number seed. Ensures random factors will be the same each run.

    Attributes
    ----------
    default_dt : float
        If ``dt`` is not specified in `~.Process.run`, `~.Process.run_steps`,
        `~.Process.ntrange`, or `~.Process.trange`, this will be used.
    default_size_in : int
        The default size in for nodes using this process.
    default_size_out : int
        The default size out for nodes running this process. Also, if ``d`` is
        not specified in `~.Process.run` or `~.Process.run_steps`,
        this will be used.
    seed : int or None
        Random number seed. Ensures random factors will be the same each run.
    """

    default_size_in = IntParam('default_size_in', low=0)
    default_size_out = IntParam('default_size_out', low=0)
    default_dt = NumberParam('default_dt', low=0, low_open=True)
    seed = IntParam('seed', low=0, high=maxint, optional=True)

    def __init__(self,
                 default_size_in=0,
                 default_size_out=1,
                 default_dt=0.001,
                 seed=None):
        super(Process, self).__init__()
        self.default_size_in = default_size_in
        self.default_size_out = default_size_out
        self.default_dt = default_dt
        self.seed = seed

    def apply(self, x, d=None, dt=None, rng=np.random, copy=True, **kwargs):
        """Run process on a given input.

        Keyword arguments that do not appear in the parameter list below
        will be passed to the ``make_step`` function of this process.

        Parameters
        ----------
        x : ndarray
            The input signal given to the process.
        d : int, optional (Default: None)
            Output dimensionality. If None, ``default_size_out`` will be used.
        dt : float, optional (Default: None)
            Simulation timestep. If None, ``default_dt`` will be used.
        rng : `numpy.random.RandomState` (Default: ``numpy.random``)
            Random number generator used for stochstic processes.
        copy : bool, optional (Default: True)
            If True, a new output array will be created for output.
            If False, the input signal ``x`` will be overwritten.
        """
        shape_in = as_shape(np.asarray(x[0]).shape, min_dim=1)
        shape_out = as_shape(self.default_size_out if d is None else d)
        dt = self.default_dt if dt is None else dt
        rng = self.get_rng(rng)
        step = self.make_step(shape_in, shape_out, dt, rng, **kwargs)
        output = np.zeros((len(x), ) + shape_out) if copy else x
        for i, xi in enumerate(x):
            output[i] = step((i + 1) * dt, xi)
        return output

    def get_rng(self, rng):
        """Get a properly seeded independent RNG for the process step.

        Parameters
        ----------
        rng : `numpy.random.RandomState`
            The parent random number generator to use if the seed is not set.
        """
        seed = rng.randint(maxint) if self.seed is None else self.seed
        return np.random.RandomState(seed)

    def make_step(self, shape_in, shape_out, dt, rng):
        """Create function that advances the process forward one time step.

        This must be implemented by all custom processes.

        Parameters
        ----------
        shape_in : tuple
            The shape of the input signal.
        shape_out : tuple
            The shape of the output signal.
        dt : float
            The simulation timestep.
        rng : `numpy.random.RandomState`
            A random number generator.
        """
        raise NotImplementedError("Process must implement `make_step` method.")

    def run(self, t, d=None, dt=None, rng=np.random, **kwargs):
        """Run process without input for given length of time.

        Keyword arguments that do not appear in the parameter list below
        will be passed to the ``make_step`` function of this process.

        Parameters
        ----------
        t : float
            The length of time to run.
        d : int, optional (Default: None)
            Output dimensionality. If None, ``default_size_out`` will be used.
        dt : float, optional (Default: None)
            Simulation timestep. If None, ``default_dt`` will be used.
        rng : `numpy.random.RandomState` (Default: ``numpy.random``)
            Random number generator used for stochstic processes.
        """
        dt = self.default_dt if dt is None else dt
        n_steps = int(np.round(float(t) / dt))
        return self.run_steps(n_steps, d=d, dt=dt, rng=rng, **kwargs)

    def run_steps(self, n_steps, d=None, dt=None, rng=np.random, **kwargs):
        """Run process without input for given number of steps.

        Keyword arguments that do not appear in the parameter list below
        will be passed to the ``make_step`` function of this process.

        Parameters
        ----------
        n_steps : int
            The number of steps to run.
        d : int, optional (Default: None)
            Output dimensionality. If None, ``default_size_out`` will be used.
        dt : float, optional (Default: None)
            Simulation timestep. If None, ``default_dt`` will be used.
        rng : `numpy.random.RandomState` (Default: ``numpy.random``)
            Random number generator used for stochstic processes.
        """
        shape_in = as_shape(0)
        shape_out = as_shape(self.default_size_out if d is None else d)
        dt = self.default_dt if dt is None else dt
        rng = self.get_rng(rng)
        step = self.make_step(shape_in, shape_out, dt, rng, **kwargs)
        output = np.zeros((n_steps, ) + shape_out)
        for i in range(n_steps):
            output[i] = step((i + 1) * dt)
        return output

    def ntrange(self, n_steps, dt=None):
        """Create time points corresponding to a given number of steps.

        Parameters
        ----------
        n_steps : int
            The given number of steps.
        dt : float, optional (Default: None)
            Simulation timestep. If None, ``default_dt`` will be used.
        """
        dt = self.default_dt if dt is None else dt
        return dt * np.arange(1, n_steps + 1)

    def trange(self, t, dt=None):
        """Create time points corresponding to a given length of time.

        Parameters
        ----------
        t : float
            The given length of time.
        dt : float, optional (Default: None)
            Simulation timestep. If None, ``default_dt`` will be used.
        """
        dt = self.default_dt if dt is None else dt
        n_steps = int(np.round(float(t) / dt))
        return self.ntrange(n_steps, dt=dt)
Esempio n. 22
0
class LearningRuleType(FrozenObject, SupportDefaultsMixin):
    """Base class for all learning rule objects.

    To use a learning rule, pass it as a ``learning_rule_type`` keyword
    argument to the `~sirsim.Connection` on which you want to do learning.

    Each learning rule exposes two important pieces of metadata that the
    builder uses to determine what information should be stored.

    The ``size_in`` is the dimensionality of the incoming error signal. It
    can either take an integer or one of the following string values:

    * ``'pre'``: vector error signal in pre-object space
    * ``'post'``: vector error signal in post-object space
    * ``'mid'``: vector error signal in the ``conn.size_mid`` space
    * ``'pre_state'``: vector error signal in pre-synaptic ensemble space
    * ``'post_state'``: vector error signal in pre-synaptic ensemble space

    The difference between ``'post_state'`` and ``'post'`` is that with the
    former, if a ``Agents`` object is passed, it will use the dimensionality
    of the corresponding ``Ensemble``, whereas the latter simply uses the
    ``post`` object ``size_in``. Similarly with ``'pre_state'`` and ``'pre'``.

    The ``modifies`` attribute denotes the signal targeted by the rule.
    Options are:

    * ``'encoders'``
    * ``'decoders'``
    * ``'weights'``

    Parameters
    ----------
    learning_rate : float, optional (Default: 1e-6)
        A scalar indicating the rate at which ``modifies`` will be adjusted.
    size_in : int, str, optional (Default: 0)
        Dimensionality of the error signal (see above).

    Attributes
    ----------
    learning_rate : float
        A scalar indicating the rate at which ``modifies`` will be adjusted.
    size_in : int, str
        Dimensionality of the error signal.
    modifies : str
        The signal targeted by the learning rule.
    """

    modifies = None
    probeable = ()

    learning_rate = NumberParam('learning_rate',
                                low=0,
                                readonly=True,
                                default=1e-6)
    size_in = LearningRuleTypeSizeInParam('size_in', low=0)

    def __init__(self, learning_rate=Default, size_in=0):
        super(LearningRuleType, self).__init__()
        self.learning_rate = learning_rate
        self.size_in = size_in

    def __repr__(self):
        r = []
        for name, default in self._argdefaults:
            value = getattr(self, name)
            if value != default:
                r.append("%s=%r" % (name, value))
        return '%s(%s)' % (type(self).__name__, ", ".join(r))

    @property
    def _argdefaults(self):
        return ('learning_rate', LearningRuleType.learning_rate.default),
Esempio n. 23
0
class Oja(LearningRuleType):
    """Oja learning rule.

    Modifies connection weights according to the Hebbian Oja rule, which
    augments typically Hebbian coactivity with a "forgetting" term that is
    proportional to the weight of the connection and the square of the
    postsynaptic activity.

    Notes
    -----
    The Oja rule is dependent on pre and post agent activities,
    not decoded values, and so is not affected by changes in the
    size of pre and post ensembles. However, if you are decoding from
    the post ensemble, the Oja rule will have an increased effect on
    larger post ensembles because more connection weights are changing.
    In these cases, it may be advantageous to scale the learning rate
    on the Oja rule by ``1 / post.n_agents``.

    Parameters
    ----------
    learning_rate : float, optional (Default: 1e-6)
        A scalar indicating the rate at which weights will be adjusted.
    pre_synapse : `.Synapse`, optional \
                  (Default: ``sirsim.synapses.Lowpass(tau=0.005)``)
        Synapse model used to filter the pre-synaptic activities.
    post_synapse : `.Synapse`, optional (Default: ``None``)
        Synapse model used to filter the post-synaptic activities.
        If None, ``post_synapse`` will be the same as ``pre_synapse``.
    beta : float, optional (Default: 1.0)
        A scalar weight on the forgetting term.

    Attributes
    ----------
    beta : float
        A scalar weight on the forgetting term.
    learning_rate : float
        A scalar indicating the rate at which weights will be adjusted.
    post_synapse : `.Synapse`
        Synapse model used to filter the post-synaptic activities.
    pre_synapse : `.Synapse`
        Synapse model used to filter the pre-synaptic activities.
    """

    modifies = 'weights'
    probeable = ('pre_filtered', 'post_filtered', 'delta')

    learning_rate = NumberParam('learning_rate',
                                low=0,
                                readonly=True,
                                default=1e-6)
    pre_synapse = SynapseParam('pre_synapse',
                               default=Lowpass(tau=0.005),
                               readonly=True)
    post_synapse = SynapseParam('post_synapse', default=None, readonly=True)
    beta = NumberParam('beta', low=0, readonly=True, default=1.0)

    pre_tau = _deprecated_tau("pre_tau", "pre_synapse")
    post_tau = _deprecated_tau("post_tau", "post_synapse")

    def __init__(self,
                 learning_rate=Default,
                 pre_synapse=Default,
                 post_synapse=Default,
                 beta=Default,
                 pre_tau=Unconfigurable,
                 post_tau=Unconfigurable):
        super(Oja, self).__init__(learning_rate, size_in=0)

        self.beta = beta

        if pre_tau is Unconfigurable:
            self.pre_synapse = pre_synapse
        else:
            self.pre_tau = pre_tau

        if post_tau is Unconfigurable:
            self.post_synapse = (self.pre_synapse
                                 if post_synapse is Default else post_synapse)
        else:
            self.post_tau = post_tau

    @property
    def _argdefaults(self):
        return (('learning_rate', Oja.learning_rate.default),
                ('pre_synapse', Oja.pre_synapse.default),
                ('post_synapse', self.pre_synapse), ('beta', Oja.beta.default))
Esempio n. 24
0
class BCM(LearningRuleType):
    """Bienenstock-Cooper-Munroe learning rule.

    Modifies connection weights as a function of the presynaptic activity
    and the difference between the postsynaptic activity and the average
    postsynaptic activity.

    Notes
    -----
    The BCM rule is dependent on pre and post agent activities,
    not decoded values, and so is not affected by changes in the
    size of pre and post ensembles. However, if you are decoding from
    the post ensemble, the BCM rule will have an increased effect on
    larger post ensembles because more connection weights are changing.
    In these cases, it may be advantageous to scale the learning rate
    on the BCM rule by ``1 / post.n_agents``.

    Parameters
    ----------
    learning_rate : float, optional (Default: 1e-9)
        A scalar indicating the rate at which weights will be adjusted.
    pre_synapse : `.Synapse`, optional \
                  (Default: ``sirsim.synapses.Lowpass(tau=0.005)``)
        Synapse model used to filter the pre-synaptic activities.
    post_synapse : `.Synapse`, optional (Default: ``None``)
        Synapse model used to filter the post-synaptic activities.
        If None, ``post_synapse`` will be the same as ``pre_synapse``.
    theta_synapse : `.Synapse`, optional \
                    (Default: ``sirsim.synapses.Lowpass(tau=1.0)``)
        Synapse model used to filter the theta signal.

    Attributes
    ----------
    learning_rate : float
        A scalar indicating the rate at which weights will be adjusted.
    post_synapse : `.Synapse`
        Synapse model used to filter the post-synaptic activities.
    pre_synapse : `.Synapse`
        Synapse model used to filter the pre-synaptic activities.
    theta_synapse : `.Synapse`
        Synapse model used to filter the theta signal.
    """

    modifies = 'weights'
    probeable = ('theta', 'pre_filtered', 'post_filtered', 'delta')

    learning_rate = NumberParam('learning_rate',
                                low=0,
                                readonly=True,
                                default=1e-9)
    pre_synapse = SynapseParam('pre_synapse',
                               default=Lowpass(tau=0.005),
                               readonly=True)
    post_synapse = SynapseParam('post_synapse', default=None, readonly=True)
    theta_synapse = SynapseParam('theta_synapse',
                                 default=Lowpass(tau=1.0),
                                 readonly=True)

    pre_tau = _deprecated_tau("pre_tau", "pre_synapse")
    post_tau = _deprecated_tau("post_tau", "post_synapse")
    theta_tau = _deprecated_tau("theta_tau", "theta_synapse")

    def __init__(self,
                 learning_rate=Default,
                 pre_synapse=Default,
                 post_synapse=Default,
                 theta_synapse=Default,
                 pre_tau=Unconfigurable,
                 post_tau=Unconfigurable,
                 theta_tau=Unconfigurable):
        super(BCM, self).__init__(learning_rate, size_in=0)

        if pre_tau is Unconfigurable:
            self.pre_synapse = pre_synapse
        else:
            self.pre_tau = pre_tau

        if post_tau is Unconfigurable:
            self.post_synapse = (self.pre_synapse
                                 if post_synapse is Default else post_synapse)
        else:
            self.post_tau = post_tau

        if theta_tau is Unconfigurable:
            self.theta_synapse = theta_synapse
        else:
            self.theta_tau = theta_tau

    @property
    def _argdefaults(self):
        return (('learning_rate', BCM.learning_rate.default),
                ('pre_synapse', BCM.pre_synapse.default), ('post_synapse',
                                                           self.pre_synapse),
                ('theta_synapse', BCM.theta_synapse.default))
Esempio n. 25
0
class LIFRate(AgentType):
    """Non-spiking version of the leaky integrate-and-fire (LIF) agent model.

    Parameters
    ----------
    tau_rc : float
        Membrane RC time constant, in seconds. Affects how quickly the membrane
        voltage decays to zero in the absence of input (larger = slower decay).
    tau_ref : float
        Absolute refractory period, in seconds. This is how long the
        membrane voltage is held at zero after a spike.
    amplitude : float
        Scaling factor on the agent output. Corresponds to the relative
        amplitude of the output spikes of the agent.
    """

    probeable = ('rates',)

    tau_rc = NumberParam('tau_rc', low=0, low_open=True)
    tau_ref = NumberParam('tau_ref', low=0)
    amplitude = NumberParam('amplitude', low=0, low_open=True)

    def __init__(self, tau_rc=0.02, tau_ref=0.002, amplitude=1):
        super(LIFRate, self).__init__()
        self.tau_rc = tau_rc
        self.tau_ref = tau_ref
        self.amplitude = amplitude

    @property
    def _argreprs(self):
        args = []
        if self.tau_rc != 0.02:
            args.append("tau_rc=%s" % self.tau_rc)
        if self.tau_ref != 0.002:
            args.append("tau_ref=%s" % self.tau_ref)
        return args

    def gain_bias(self, max_rates, intercepts):
        """Analytically determine gain, bias."""
        max_rates = np.array(max_rates, dtype=float, copy=False, ndmin=1)
        intercepts = np.array(intercepts, dtype=float, copy=False, ndmin=1)

        inv_tau_ref = 1. / self.tau_ref if self.tau_ref > 0 else np.inf
        if np.any(max_rates > inv_tau_ref):
            raise ValidationError("Max rates must be below the inverse "
                                  "refractory period (%0.3f)" % inv_tau_ref,
                                  attr='max_rates', obj=self)

        x = 1.0 / (1 - np.exp(
            (self.tau_ref - (1.0 / max_rates)) / self.tau_rc))
        gain = (1 - x) / (intercepts - 1.0)
        bias = 1 - gain * intercepts
        return gain, bias

    def max_rates_intercepts(self, gain, bias):
        """Compute the inverse of gain_bias."""
        intercepts = (1 - bias) / gain
        max_rates = 1.0 / (self.tau_ref - self.tau_rc * np.log1p(
            1.0 / (gain * (intercepts - 1) - 1)))
        if not np.all(np.isfinite(max_rates)):
            warnings.warn("Non-finite values detected in `max_rates`; this "
                          "probably means that `gain` was too small.")
        return max_rates, intercepts

    def rates(self, x, gain, bias):
        """Always use LIFRate to determine rates."""
        J = self.current(x, gain, bias)
        out = np.zeros_like(J)
        # Use LIFRate's step_math explicitly to ensure rate approximation
        LIFRate.step_math(self, dt=1, J=J, output=out)
        return out

    def step_math(self, dt, J, output):
        """Implement the LIFRate nonlinearity."""
        j = J - 1
        output[:] = 0  # faster than output[j <= 0] = 0
        output[j > 0] = self.amplitude / (
            self.tau_ref + self.tau_rc * np.log1p(1. / j[j > 0]))
Esempio n. 26
0
class Probe(SirsimObject):
    """A probe is an object that collects data from the simulation.

    This is to be used in any situation where you wish to gather simulation
    data (spike data, represented values, agent voltages, etc.) for analysis.

    Probes do not directly affect the simulation.

    All Sirsim objects can be probed (except Probes themselves).
    Each object has different attributes that can be probed.
    To see what is probeable for each object, print its
    ``probeable`` attribute.

    >>> with sirsim.Network():
    ...     ens = sirsim.Ensemble(10, 1)
    >>> print(ens.probeable)
    ['decoded_output', 'input']

    Parameters
    ----------
    target : Ensemble, Agents, Node, or Connection
        The object to probe.

    attr : str, optional (Default: None)
        The signal to probe. Refer to the target's ``probeable`` list for
        details. If None, the first element in the ``probeable`` list
        will be used.
    sample_every : float, optional (Default: None)
        Sampling period in seconds. If None, the ``dt`` of the simluation
        will be used.
    synapse : Synapse, optional (Default: None)
        A synaptic model to filter the probed signal.
    solver : Solver, optional (Default: ``ConnectionDefault``)
        `~sirsim.solvers.Solver` to compute decoders
        for probes that require them.
    label : str, optional (Default: None)
        A name for the probe. Used for debugging and visualization.
    seed : int, optional (Default: None)
        The seed used for random number generation.

    Attributes
    ----------
    attr : str or None
        The signal that will be probed. If None, the first element of the
        target's ``probeable`` list will be used.
    sample_every : float or None
        Sampling period in seconds. If None, the ``dt`` of the simluation
        will be used.
    solver : Solver or None
        `~sirsim.solvers.Solver` to compute decoders. Only used for probes
        of an ensemble's decoded output.
    synapse : Synapse or None
        A synaptic model to filter the probed signal.
    target : Ensemble, Agents, Node, or Connection
        The object to probe.
    """

    target = TargetParam('target', nonzero_size_out=True)
    attr = AttributeParam('attr', default=None, optional=True)
    sample_every = NumberParam('sample_every',
                               default=None,
                               optional=True,
                               low=1e-10)
    synapse = SynapseParam('synapse', default=None, optional=True)
    solver = ProbeSolverParam('solver', default=ConnectionDefault)

    _param_init_order = ['target']

    def __init__(self,
                 target,
                 attr=None,
                 sample_every=Default,
                 synapse=Default,
                 solver=Default,
                 label=Default,
                 seed=Default):
        super(Probe, self).__init__(label=label, seed=seed)
        self.target = target
        self.attr = attr if attr is not None else self.obj.probeable[0]
        self.sample_every = sample_every
        self.synapse = synapse
        self.solver = solver

    def __repr__(self):
        return "<Probe%s at 0x%x of '%s' of %s>" % (
            "" if self.label is None else ' "%s"' % self.label, id(self),
            self.attr, self.target)

    def __str__(self):
        return "<Probe%s of '%s' of %s>" % ("" if self.label is None else
                                            ' "%s"' % self.label, self.attr,
                                            self.target)

    @property
    def obj(self):
        """(Sirsim object) The underlying Sirsim object target."""
        return (self.target.obj
                if isinstance(self.target, ObjView) else self.target)

    @property
    def size_in(self):
        """(int) Dimensionality of the probed signal."""
        return self.target.size_out

    @property
    def size_out(self):
        """(int) Cannot connect from probes, so always 0."""
        return 0

    @property
    def slice(self):
        """(slice) The slice associated with the Sirsim object target."""
        return (self.target.slice
                if isinstance(self.target, ObjView) else None)
Esempio n. 27
0
class Izhikevich(AgentType):
    """Izhikevich agent model.

    This implementation is based on the original paper [1]_;
    however, we rename some variables for clarity.
    What was originally 'v' we term 'voltage', which represents the membrane
    potential of each agent. What was originally 'u' we term 'recovery',
    which represents membrane recovery, "which accounts for the activation
    of K+ ionic currents and inactivation of Na+ ionic currents."
    The 'a', 'b', 'c', and 'd' parameters are also renamed
    (see the parameters below).

    We use default values that correspond to regular spiking ('RS') agents.
    For other classes of agents, set the parameters as follows.

    * Intrinsically bursting (IB): ``reset_voltage=-55, reset_recovery=4``
    * Chattering (CH): ``reset_voltage=-50, reset_recovery=2``
    * Fast spiking (FS): ``tau_recovery=0.1``
    * Low-threshold spiking (LTS): ``coupling=0.25``
    * Resonator (RZ): ``tau_recovery=0.1, coupling=0.26``

    Parameters
    ----------
    tau_recovery : float, optional (Default: 0.02)
        (Originally 'a') Time scale of the recovery variable.
    coupling : float, optional (Default: 0.2)
        (Originally 'b') How sensitive recovery is to subthreshold
        fluctuations of voltage.
    reset_voltage : float, optional (Default: -65.)
        (Originally 'c') The voltage to reset to after a spike, in millivolts.
    reset_recovery : float, optional (Default: 8.)
        (Originally 'd') The recovery value to reset to after a spike.

    References
    ----------
    .. [1] E. M. Izhikevich, "Simple model of spiking agents."
       IEEE Transactions on Neural Networks, vol. 14, no. 6, pp. 1569-1572.
       (http://www.izhikevich.org/publications/spikes.pdf)
    """

    probeable = ('spikes', 'voltage', 'recovery')

    tau_recovery = NumberParam('tau_recovery', low=0, low_open=True)
    coupling = NumberParam('coupling', low=0)
    reset_voltage = NumberParam('reset_voltage')
    reset_recovery = NumberParam('reset_recovery')

    def __init__(self, tau_recovery=0.02, coupling=0.2,
                 reset_voltage=-65., reset_recovery=8.):
        super(Izhikevich, self).__init__()
        self.tau_recovery = tau_recovery
        self.coupling = coupling
        self.reset_voltage = reset_voltage
        self.reset_recovery = reset_recovery

    @property
    def _argreprs(self):
        args = []

        def add(attr, default):
            if getattr(self, attr) != default:
                args.append("%s=%s" % (attr, getattr(self, attr)))
        add("tau_recovery", 0.02)
        add("coupling", 0.2)
        add("reset_voltage", -65.)
        add("reset_recovery", 8.)
        return args

    def rates(self, x, gain, bias):
        """Estimates steady-state firing rate given gain and bias."""
        J = self.current(x, gain, bias)
        voltage = np.zeros_like(J)
        recovery = np.zeros_like(J)
        return settled_firingrate(self.step_math, J, [voltage, recovery],
                                  settle_time=0.001, sim_time=1.0)

    def step_math(self, dt, J, spiked, voltage, recovery):
        """Implement the Izhikevich nonlinearity."""
        # Numerical instability occurs for very low inputs.
        # We'll clip them be greater than some value that was chosen by
        # looking at the simulations for many parameter sets.
        # A more principled minimum value would be better.
        J = np.maximum(-30., J)

        dV = (0.04 * voltage ** 2 + 5 * voltage + 140 - recovery + J) * 1000
        voltage[:] += dV * dt

        # We check for spikes and reset the voltage here rather than after,
        # which differs from the original implementation by Izhikevich.
        # However, calculating recovery for voltage values greater than
        # threshold can cause the system to blow up, which we want
        # to avoid at all costs.
        spiked[:] = (voltage >= 30) / dt
        voltage[spiked > 0] = self.reset_voltage

        dU = (self.tau_recovery * (self.coupling * voltage - recovery)) * 1000
        recovery[:] += dU * dt
        recovery[spiked > 0] = recovery[spiked > 0] + self.reset_recovery
Esempio n. 28
0
class WhiteSignal(Process):
    """An ideal low-pass filtered white noise process.

    This signal is created in the frequency domain, and designed to have
    exactly equal power at all frequencies below the cut-off frequency,
    and no power above the cut-off.

    The signal is naturally periodic, so it can be used beyond its period
    while still being continuous with continuous derivatives.

    Parameters
    ----------
    period : float
        A white noise signal with this period will be generated.
        Samples will repeat after this duration.
    high : float
        The cut-off frequency of the low-pass filter, in Hz.
        Must not exceed the Nyquist frequency for the simulation
        timestep, which is ``0.5 / dt``.
    rms : float, optional (Default: 0.5)
        The root mean square power of the filtered signal
    y0 : float, optional (Default: None)
        Align the phase of each output dimension to begin at the value
        that is closest (in absolute value) to y0.
    seed : int, optional (Default: None)
        Random number seed. Ensures noise will be the same each run.
    """

    period = NumberParam('period', low=0, low_open=True)
    high = NumberParam('high', low=0, low_open=True)
    rms = NumberParam('rms', low=0, low_open=True)
    y0 = NumberParam('y0', optional=True)

    def __init__(self, period, high, rms=0.5, y0=None, **kwargs):
        super(WhiteSignal, self).__init__(default_size_in=0, **kwargs)
        self.period = period
        self.high = high
        self.rms = rms
        self.y0 = y0

        if self.high is not None and self.high < 1. / self.period:
            raise ValidationError(
                "Make ``high >= 1. / period`` to produce a non-zero signal",
                attr='high', obj=self)

    def __repr__(self):
        return "%s(period=%r, high=%r, rms=%r)" % (
            type(self).__name__, self.period, self.high, self.rms)

    def make_step(self, shape_in, shape_out, dt, rng):
        assert shape_in == (0,)

        nyquist_cutoff = 0.5 / dt
        if self.high > nyquist_cutoff:
            raise ValidationError("High must not exceed the Nyquist frequency "
                                  "for the given dt (%0.3f)" % nyquist_cutoff,
                                  attr='high', obj=self)

        n_coefficients = int(np.ceil(self.period / dt / 2.))
        shape = (n_coefficients + 1,) + shape_out
        sigma = self.rms * np.sqrt(0.5)
        coefficients = 1j * rng.normal(0., sigma, size=shape)
        coefficients += rng.normal(0., sigma, size=shape)
        coefficients[0] = 0.
        coefficients[-1].imag = 0.

        set_to_zero = npext.rfftfreq(2 * n_coefficients, d=dt) > self.high
        coefficients[set_to_zero] = 0.
        power_correction = np.sqrt(
            1. - np.sum(set_to_zero, dtype=float) / n_coefficients)
        if power_correction > 0.:
            coefficients /= power_correction
        coefficients *= np.sqrt(2 * n_coefficients)
        signal = np.fft.irfft(coefficients, axis=0)

        if self.y0 is not None:
            # Starts each dimension off where it is closest to y0
            def shift(x):
                offset = np.argmin(abs(self.y0 - x))
                return np.roll(x, -offset+1)  # +1 since t starts at dt
            signal = np.apply_along_axis(shift, 0, signal)

        def step_whitesignal(t):
            i = int(round(t / dt))
            return signal[i % signal.shape[0]]

        return step_whitesignal