예제 #1
0
class UniformHypersphere(Distribution):
    """Uniform distribution on or in an n-dimensional unit hypersphere.

    Sample points are uniformly distibuted across the volume (default) or
    surface of an n-dimensional unit hypersphere.

    Parameters
    ----------
    surface : bool, optional (Default: False)
        Whether sample points should be distributed uniformly
        over the surface of the hyperphere (True),
        or within the hypersphere (False).
    min_magnitude : Number, optional (Default: 0)
        Lower bound on the returned vector magnitudes (such that they are in
        the range ``[min_magnitude, 1]``). Must be in the range [0, 1).
        Ignored if ``surface`` is ``True``.
    """

    surface = BoolParam('surface')
    min_magnitude = NumberParam('min_magnitude', low=0, high=1, high_open=True)

    def __init__(self, surface=False, min_magnitude=0):
        super(UniformHypersphere, self).__init__()
        if surface and min_magnitude > 0:
            warnings.warn("min_magnitude ignored because surface is True")
        self.surface = surface
        self.min_magnitude = min_magnitude

    def __repr__(self):
        args = []
        if self.surface:
            args.append("surface=%s" % self.surface)
        if self.min_magnitude > 0:
            args.append("min_magnitude=%r" % self.min_magnitude)
        return "%s(%s)" % (type(self).__name__, ', '.join(args))

    def sample(self, n, d, rng=np.random):
        if d is None or d < 1:  # check this, since other dists allow d = None
            raise ValidationError("Dimensions must be a positive integer", 'd')

        samples = rng.randn(n, d)
        samples /= npext.norm(samples, axis=1, keepdims=True)

        if self.surface:
            return samples

        # Generate magnitudes for vectors from uniform distribution.
        # The (1 / d) exponent ensures that samples are uniformly distributed
        # in n-space and not all bunched up at the centre of the sphere.
        samples *= rng.uniform(low=self.min_magnitude**d, high=1,
                               size=(n, 1))**(1. / d)

        return samples
예제 #2
0
파일: dists.py 프로젝트: tuchang/nengo
class Uniform(Distribution):
    """A uniform distribution.

    It's equally likely to get any scalar between ``low`` and ``high``.

    Note that the order of ``low`` and ``high`` doesn't matter;
    if ``low < high`` this will still work, and ``low`` will still
    be a closed interval while ``high`` is open.

    Parameters
    ----------
    low : Number
        The closed lower bound of the uniform distribution; samples >= low
    high : Number
        The open upper bound of the uniform distribution; samples < high
    integer : boolean, optional (Default: False)
        If true, sample from a uniform distribution of integers. In this case,
        low and high should be integers.
    """

    low = NumberParam('low')
    high = NumberParam('high')
    integer = BoolParam('integer')

    def __init__(self, low, high, integer=False):
        super(Uniform, self).__init__()
        self.low = low
        self.high = high
        self.integer = integer

    def __repr__(self):
        return "Uniform(low=%r, high=%r%s)" % (
            self.low, self.high, ", integer=True" if self.integer else "")

    def sample(self, n, d=None, rng=np.random):
        shape = self._sample_shape(n, d)
        if self.integer:
            return rng.randint(low=self.low, high=self.high, size=shape)
        else:
            return rng.uniform(low=self.low, high=self.high, size=shape)
예제 #3
0
class STDP(nengo.learning_rules.LearningRuleType):
    """Spike-timing dependent plasticity rule."""

    # Used by other Nengo objects
    modifies = "weights"
    probeable = ("pre_trace", "post_trace", "pre_scale", "post_scale")

    # Parameters
    pre_tau = NumberParam("pre_tau", low=0, low_open=True)
    pre_amp = NumberParam("pre_amp", low=0, low_open=True)
    post_tau = NumberParam("post_tau", low=0, low_open=True)
    post_amp = NumberParam("post_amp", low=0, low_open=True)
    bounds = StringParam("bounds")
    max_weight = NumberParam("max_weight")
    min_weight = NumberParam("min_weight")

    def __init__(
        self,
        pre_tau=0.0168,
        post_tau=0.0337,
        pre_amp=1.0,
        post_amp=1.0,
        bounds="hard",
        max_weight=0.3,
        min_weight=-0.3,
        learning_rate=1e-9,
    ):
        self.pre_tau = pre_tau
        self.post_tau = post_tau
        self.pre_amp = pre_amp
        self.post_amp = post_amp
        self.bounds = bounds
        self.max_weight = max_weight
        self.min_weight = min_weight
        super(STDP, self).__init__(learning_rate)
class mPES(LearningRuleType):
    modifies = "weights"
    probeable = ("error", "activities", "delta", "pos_memristors",
                 "neg_memristors")

    pre_synapse = SynapseParam("pre_synapse",
                               default=Lowpass(tau=0.005),
                               readonly=True)
    r_max = NumberParam("r_max", readonly=True, default=2.3e8)
    r_min = NumberParam("r_min", readonly=True, default=200)
    exponent = NumberParam("exponent", readonly=True, default=-0.146)
    gain = NumberParam("gain", readonly=True, default=1e3)

    def __init__(self,
                 pre_synapse=Default,
                 r_max=Default,
                 r_min=Default,
                 exponent=Default,
                 noisy=False,
                 gain=Default,
                 seed=None):
        super().__init__(size_in="post_state")

        self.pre_synapse = pre_synapse
        self.r_max = r_max
        self.r_min = r_min
        self.exponent = exponent
        self.noise_percentage = 0 if not noisy else noisy
        self.gain = gain
        self.seed = seed

    @property
    def _argdefaults(self):
        return (
            ("learning_rate", mPES.learning_rate.default),
            ("pre_synapse", mPES.pre_synapse.default),
            ("r_max", mPES.r_max.default),
            ("r_min", mPES.r_min.default),
            ("exponent", mPES.exponent.default),
        )
예제 #5
0
class LIF(LIFRate):
    """Spiking version of the leaky integrate-and-fire (LIF) neuron model.

    Parameters
    ----------
    tau_rc : float
        Membrane RC time constant, in seconds. Affects how quickly the membrane
        voltage decays to zero in the absence of input (larger = slower decay).
    tau_ref : float
        Absolute refractory period, in seconds. This is how long the
        membrane voltage is held at zero after a spike.
    min_voltage : float
        Minimum value for the membrane voltage. If ``-np.inf``, the voltage
        is never clipped.
    """

    probeable = ('spikes', 'voltage', 'refractory_time')

    min_voltage = NumberParam('min_voltage', high=0)

    def __init__(self, tau_rc=0.02, tau_ref=0.002, min_voltage=0):
        super(LIF, self).__init__(tau_rc=tau_rc, tau_ref=tau_ref)
        self.min_voltage = min_voltage

    def step_math(self, dt, J, spiked, voltage, refractory_time):
        # reduce all refractory times by dt
        refractory_time -= dt

        # compute effective dt for each neuron, based on remaining time.
        # note that refractory times that have completed midway into this
        # timestep will be given a partial timestep, and moreover these will
        # be subtracted to zero at the next timestep (or reset by a spike)
        delta_t = (dt - refractory_time).clip(0, dt)

        # update voltage using discretized lowpass filter
        # since v(t) = v(0) + (J - v(0))*(1 - exp(-t/tau)) assuming
        # J is constant over the interval [t, t + dt)
        voltage -= (J - voltage) * np.expm1(-delta_t / self.tau_rc)

        # determine which neurons spiked (set them to 1/dt, else 0)
        spiked_mask = voltage > 1
        spiked[:] = spiked_mask / dt

        # set v(0) = 1 and solve for t to compute the spike time
        t_spike = dt + self.tau_rc * np.log1p(-(voltage[spiked_mask] - 1) /
                                              (J[spiked_mask] - 1))

        # set spiked voltages to zero, refractory times to tau_ref, and
        # rectify negative voltages to a floor of min_voltage
        voltage[voltage < self.min_voltage] = self.min_voltage
        voltage[spiked_mask] = 0
        refractory_time[spiked_mask] = self.tau_ref + t_spike
예제 #6
0
def add_spinnaker_params(config):
    """Add SpiNNaker specific parameters to a configuration object."""
    # Add simulator parameters
    config.configures(Simulator)

    config[Simulator].set_param("placer", CallableParameter(default=par.place))
    config[Simulator].set_param("placer_kwargs", DictParam(default={}))

    config[Simulator].set_param("allocater",
                                CallableParameter(default=par.allocate))
    config[Simulator].set_param("allocater_kwargs", DictParam(default={}))

    config[Simulator].set_param("router", CallableParameter(default=par.route))
    config[Simulator].set_param("router_kwargs", DictParam(default={}))

    config[Simulator].set_param("node_io", Parameter(default=Ethernet))
    config[Simulator].set_param("node_io_kwargs", DictParam(default={}))

    # Add function_of_time parameters to Nodes
    config[nengo.Node].set_param("function_of_time", BoolParam(default=False))
    config[nengo.Node].set_param("function_of_time_period",
                                 NumberParam(default=None, optional=True))

    # Add multiple-core options to Nodes
    config[nengo.Node].set_param(
        "n_cores_per_chip",
        IntParam(default=None, low=1, high=16, optional=True))
    config[nengo.Node].set_param("n_chips",
                                 IntParam(default=None, low=1, optional=True))
    # Add optimisation control parameters to (passthrough) Nodes. None means
    # that a heuristic will be used to determine if the passthrough Node should
    # be removed.
    config[nengo.Node].set_param("optimize_out",
                                 BoolParam(default=None, optional=True))

    # Add profiling parameters to Ensembles
    config[nengo.Ensemble].set_param("profile", BoolParam(default=False))
    config[nengo.Ensemble].set_param("profile_num_samples",
                                     NumberParam(default=None, optional=True))
예제 #7
0
class BlockConjgrad(LeastSquaresSolver):
    """Solve a multiple-RHS least-squares system using block conj. gradient."""

    tol = NumberParam('tol', low=0)
    X0 = NdarrayParam('X0', shape=('*', '*'), optional=True)

    def __init__(self, tol=1e-2, X0=None):
        super(BlockConjgrad, self).__init__()
        self.tol = tol
        self.X0 = X0

    def __call__(self, A, Y, sigma, rng=None):
        Y, m, n, d, matrix_in = format_system(A, Y)
        sigma = np.asarray(sigma, dtype='float')
        sigma = sigma.reshape(sigma.size, 1)

        X = np.zeros((n, d)) if self.X0 is None else np.array(self.X0)
        if X.shape != (n, d):
            raise ValidationError("Must be shape %s, got %s" %
                                  ((n, d), X.shape),
                                  attr='X0',
                                  obj=self)

        damp = m * sigma**2
        rtol = self.tol * np.sqrt(m)
        G = lambda x: np.dot(A.T, np.dot(A, x)) + damp * x
        B = np.dot(A.T, Y)

        # --- conjugate gradient
        R = B - G(X)
        P = np.array(R)
        Rsold = np.dot(R.T, R)
        AP = np.zeros((n, d))

        maxiters = int(n / d)
        for i in range(maxiters):
            AP = G(P)
            alpha = np.linalg.solve(np.dot(P.T, AP), Rsold)
            X += np.dot(P, alpha)
            R -= np.dot(AP, alpha)

            Rsnew = np.dot(R.T, R)
            if (np.diag(Rsnew) < rtol**2).all():
                break

            beta = np.linalg.solve(Rsold, Rsnew)
            P = R + np.dot(P, beta)
            Rsold = Rsnew

        info = {'rmses': rmses(A, X, Y), 'iterations': i + 1}
        return X if matrix_in else X.ravel(), info
예제 #8
0
class STDP(nengo.learning_rules.LearningRuleType):
    """Simplified Spike-timing dependent plasticity rule."""

    # Used by other Nengo objects
    modifies = 'weights'
    probeable = ('pre_trace', 'post_trace', "delta")

    # Parameters

    pre_tau = NumberParam('pre_tau', low=0, low_open=True)
    post_tau = NumberParam('post_tau', low=0, low_open=True)
    alf_p = NumberParam('alf_p', low=0, low_open=True)
    alf_n = NumberParam('alf_n', low=0, low_open=True)
    beta_p = NumberParam('beta_p', low=0, low_open=True)
    beta_n = NumberParam('beta_n', low=0, low_open=True)
    max_weight = NumberParam('max_weight')
    min_weight = NumberParam('min_weight')
    learning_rate = NumberParam("learning_rate",
                                low=0,
                                readonly=True,
                                default=15e-3)

    def __init__(
        self,
        alf_p=0.05,
        alf_n=0.0001,
        beta_p=1.5,
        beta_n=0.5,
        max_weight=1.0,
        min_weight=0.0001,
        pre_tau=0.0168,
        post_tau=0.0337,
        learning_rate=Default,
    ):
        self.pre_tau = pre_tau
        self.post_tau = post_tau
        self.alf_p = alf_p
        self.alf_n = alf_n
        self.beta_p = beta_p
        self.beta_n = beta_n
        self.max_weight = max_weight
        self.min_weight = min_weight
        super().__init__(learning_rate)
예제 #9
0
class Lowpass(LinearFilter):
    """Standard first-order lowpass filter synapse.

    The impulse-response function is given by::

        f(t) = (t / tau) * exp(-t / tau)

    Parameters
    ----------
    tau : float
        The time constant of the filter in seconds.

    Attributes
    ----------
    tau : float
        The time constant of the filter in seconds.
    """
    tau = NumberParam('tau', low=0)

    def __init__(self, tau, **kwargs):
        super(Lowpass, self).__init__([1], [tau, 1], **kwargs)
        self.tau = tau

    def __repr__(self):
        return "%s(%r)" % (type(self).__name__, self.tau)

    def make_step(self,
                  shape_in,
                  shape_out,
                  dt,
                  rng,
                  y0=None,
                  dtype=np.float64,
                  **kwargs):
        """Returns an optimized `.LinearFilter.Step` subclass."""
        # if tau < 0.03 * dt, exp(-dt / tau) < 1e-14, so just make it zero
        if self.tau <= .03 * dt:
            return self._make_zero_step(shape_in,
                                        shape_out,
                                        dt,
                                        rng,
                                        y0=y0,
                                        dtype=dtype)
        return super(Lowpass, self).make_step(shape_in,
                                              shape_out,
                                              dt,
                                              rng,
                                              y0=y0,
                                              dtype=dtype,
                                              **kwargs)
예제 #10
0
파일: neurons.py 프로젝트: hunse/phd
class IF(IFRate):
    """Spiking version of the integrate-and-fire (IF) neuron model.

    Parameters
    ----------
    tau_ref : float
        Absolute refractory period, in seconds. This is how long the
        membrane voltage is held at zero after a spike.
    min_voltage : float
        Minimum value for the membrane voltage. If ``-np.inf``, the voltage
        is never clipped.
    """

    probeable = ('spikes', 'voltage', 'refractory_time')

    min_voltage = NumberParam('min_voltage', high=0)

    def __init__(self, tau_ref=0., amplitude=1., min_voltage=0):
        super(IF, self).__init__(tau_ref=tau_ref, amplitude=amplitude)
        self.min_voltage = min_voltage

    def step_math(self, dt, J, spiked, voltage, refractory_time):
        # reduce all refractory times by dt
        refractory_time -= dt

        # compute effective dt for each neuron, based on remaining time.
        # note that refractory times that have completed midway into this
        # timestep will be given a partial timestep, and moreover these will
        # be subtracted to zero at the next timestep (or reset by a spike)
        delta_t = (dt - refractory_time).clip(0, dt)

        # update voltage by integrating
        voltage += delta_t * J

        # determine which neurons spiked (set them to 1/dt, else 0)
        spiked_mask = voltage > 1
        spiked[:] = spiked_mask * (self.amplitude / dt)

        # set v(0) = 1 and solve for t to compute the spike time
        t_spike = dt - (voltage[spiked_mask] - 1) / J[spiked_mask]

        # set spiked neuron refractory times to tau_ref
        refractory_time[spiked_mask] = self.tau_ref + t_spike

        # set spiked neuron voltages to zero, unless the ref period is small
        delta_t2 = (dt - refractory_time[spiked_mask]).clip(0, dt)
        voltage[spiked_mask] = delta_t2 * J[spiked_mask]

        # rectify negative voltages to a floor of min_voltage
        voltage[voltage < self.min_voltage] = self.min_voltage
예제 #11
0
class stpLIF(LIF):
    probeable = ('spikes', 'resources', 'voltage', 'refractory_time',
                 'calcium')

    tau_x = NumberParam('tau_x', low=0, low_open=True)
    tau_u = NumberParam('tau_u', low=0, low_open=True)
    U = NumberParam('U', low=0, low_open=True)

    def __init__(self, tau_x=0.2, tau_u=1.5, U=0.2, **lif_args):
        super(stpLIF, self).__init__(**lif_args)
        self.tau_x = tau_x
        self.tau_u = tau_u
        self.U = U

    @property
    def _argreprs(self):
        args = super(LIFRate, self)._argreprs
        if self.tau_x != 0.2:
            args.append("tau_x=%s" % self.tau_x)
        if self.tau_u != 1.5:
            args.append("tau_u=%s" % self.tau_u)
        if self.U != 0.2:
            args.append("U=%s" % self.U)
        return args

    def step_math(self, dt, J, output, voltage, ref, resources, calcium):
        """Implement the u and x parameters """
        x = resources
        u = calcium
        LIF.step_math(self, dt, J, output, voltage, ref)

        #calculate u and x
        dx = dt * ((1 - x) / self.tau_x - u * x * output)
        du = dt * ((self.U - u) / self.tau_u + self.U * (1 - u) * output)

        x += dx
        u += du
예제 #12
0
class Voja(LearningRuleType):
    """Vector Oja learning rule.

    Modifies an ensemble's encoders to be selective to its inputs.

    A connection to the learning rule will provide a scalar weight for the
    learning rate, minus 1. For instance, 0 is normal learning, -1 is no
    learning, and less than -1 causes anti-learning or "forgetting".

    Parameters
    ----------
    post_tau : float, optional
        Filter constant on activities of neurons in post population.
    learning_rate : float, optional
        A scalar indicating the rate at which encoders will be adjusted.
    post_synapse : `.Synapse`, optional
        Synapse model used to filter the post-synaptic activities.

    Attributes
    ----------
    learning_rate : float
        A scalar indicating the rate at which encoders will be adjusted.
    post_synapse : `.Synapse`
        Synapse model used to filter the post-synaptic activities.
    """

    modifies = 'encoders'
    probeable = ('post_filtered', 'scaled_encoders', 'delta')

    learning_rate = NumberParam(
        'learning_rate', low=0, readonly=True, default=1e-2)
    post_synapse = SynapseParam(
        'post_synapse', default=Lowpass(tau=0.005), readonly=True)

    post_tau = _deprecated_tau("post_tau", "post_synapse")

    def __init__(self, learning_rate=Default, post_synapse=Default,
                 post_tau=Unconfigurable):
        super().__init__(learning_rate, size_in=1)

        if post_tau is Unconfigurable:
            self.post_synapse = post_synapse
        else:
            self.post_tau = post_tau

    @property
    def _argdefaults(self):
        return (('learning_rate', Voja.learning_rate.default),
                ('post_synapse', Voja.post_synapse.default))
예제 #13
0
class Tanh(NeuronType):
    """A non-spiking neuron model whose response curve is a hyperbolic tangent.

    Parameters
    ----------
    tau_ref : float
        The neuron refractory period, in seconds. The maximum firing rate of the
        neurons is ``1 / tau_ref``. Must be positive (i.e. ``tau_ref > 0``).
    initial_state : {str: Distribution or array_like}
        Mapping from state variables names to their desired initial value.
        These values will override the defaults set in the class's state attribute.
    """

    state = {"rates": Choice([0])}

    tau_ref = NumberParam("tau_ref", low=0, low_open=True)

    def __init__(self, tau_ref=0.0025, initial_state=None):
        super().__init__(initial_state)
        self.tau_ref = tau_ref

    def gain_bias(self, max_rates, intercepts):
        """Analytically determine gain, bias."""
        max_rates = np.array(max_rates, dtype=float, copy=False, ndmin=1)
        intercepts = np.array(intercepts, dtype=float, copy=False, ndmin=1)

        inv_tau_ref = 1.0 / self.tau_ref
        if not np.all(max_rates < inv_tau_ref):
            raise ValidationError(
                "Max rates must be below the inverse refractory period (%0.3f)"
                % inv_tau_ref,
                attr="max_rates",
                obj=self,
            )

        inverse = np.arctanh(max_rates * self.tau_ref)
        gain = inverse / (1.0 - intercepts)
        bias = -gain * intercepts
        return gain, bias

    def max_rates_intercepts(self, gain, bias):
        """Compute the inverse of gain_bias."""
        intercepts = -bias / gain
        max_rates = (1.0 / self.tau_ref) * np.tanh(gain + bias)
        return max_rates, intercepts

    def step(self, dt, J, rates):
        """Implement the tanh nonlinearity."""
        rates[...] = (1.0 / self.tau_ref) * np.tanh(J)
예제 #14
0
class LIF(LIFRate):
    """Spiking version of the leaky integrate-and-fire (LIF) neuron model.

    Parameters
    ----------
    tau_rc : float
        Membrane RC time constant, in seconds. Affects how quickly the membrane
        voltage decays to zero in the absence of input (larger = slower decay).
    tau_ref : float
        Absolute refractory period, in seconds. This is how long the
        membrane voltage is held at zero after a spike.
    min_voltage : float
        Minimum value for the membrane voltage. If ``-np.inf``, the voltage
        is never clipped.
    """

    probeable = ('spikes', 'voltage', 'refractory_time')

    min_voltage = NumberParam('min_voltage', high=0)

    def __init__(self, tau_rc=0.02, tau_ref=0.002, min_voltage=0):
        super(LIF, self).__init__(tau_rc=tau_rc, tau_ref=tau_ref)
        self.min_voltage = min_voltage

    def step_math(self, dt, J, spiked, voltage, refractory_time):
        """Implement the LIF nonlinearity."""

        # update voltage using accurate exponential integration scheme
        dV = -np.expm1(-dt / self.tau_rc) * (J - voltage)
        voltage += dV
        voltage[voltage < self.min_voltage] = self.min_voltage

        # update refractory period assuming no spikes for now
        refractory_time -= dt

        # set voltages of neurons still in their refractory period to 0
        # and reduce voltage of neurons partway out of their ref. period
        voltage *= (1 - refractory_time / dt).clip(0, 1)

        # determine which neurons spike (if v > 1 set spiked = 1/dt, else 0)
        spiked[:] = (voltage > 1) / dt

        # linearly approximate time since neuron crossed spike threshold
        overshoot = (voltage[spiked > 0] - 1) / dV[spiked > 0]
        spiketime = dt * (1 - overshoot)

        # set spiking neurons' voltages to zero, and ref. time to tau_ref
        voltage[spiked > 0] = 0
        refractory_time[spiked > 0] = self.tau_ref + spiketime
예제 #15
0
class PES(LearningRuleType):
    """Prescribed Error Sensitivity learning rule.

    Modifies a connection's decoders to minimize an error signal provided
    through a connection to the connection's learning rule.

    Parameters
    ----------
    learning_rate : float, optional (Default: 1e-4)
        A scalar indicating the rate at which weights will be adjusted.
    pre_synapse : `.Synapse`, optional \
                  (Default: ``nengo.synapses.Lowpass(tau=0.005)``)
        Synapse model used to filter the pre-synaptic activities.

    Attributes
    ----------
    learning_rate : float
        A scalar indicating the rate at which weights will be adjusted.
    pre_synapse : `.Synapse`
        Synapse model used to filter the pre-synaptic activities.
    """

    modifies = 'decoders'
    probeable = ('error', 'correction', 'activities', 'delta')

    learning_rate = NumberParam(
        'learning_rate', low=0, readonly=True, default=1e-4)
    pre_synapse = SynapseParam(
        'pre_synapse', default=Lowpass(tau=0.005), readonly=True)

    pre_tau = _deprecated_tau("pre_tau", "pre_synapse")

    def __init__(self, learning_rate=Default, pre_synapse=Default,
                 pre_tau=Unconfigurable):
        super(PES, self).__init__(learning_rate, size_in='post_state')
        if learning_rate is not Default and learning_rate >= 1.0:
            warnings.warn("This learning rate is very high, and can result "
                          "in floating point errors from too much current.")

        if pre_tau is Unconfigurable:
            self.pre_synapse = pre_synapse
        else:
            self.pre_tau = pre_tau

    @property
    def _argdefaults(self):
        return (('learning_rate', PES.learning_rate.default),
                ('pre_synapse', PES.pre_synapse.default))
예제 #16
0
class LstsqL2(Solver):
    """Least-squares solver with L2 regularization."""

    reg = NumberParam("reg", low=0)
    solver = LeastSquaresSolverParam("solver")

    def __init__(self, weights=False, reg=0.1, solver=lstsq.Cholesky()):
        super().__init__(weights=weights)
        self.reg = reg
        self.solver = solver

    def __call__(self, A, Y, rng=np.random):
        tstart = time.time()
        sigma = self.reg * A.max()
        X, info = self.solver(A, Y, sigma, rng=rng)
        info["time"] = time.time() - tstart
        return X, info
예제 #17
0
class Process(object):
    """A general system with input, output, and state.

    Attributes
    ----------
    default_size_out : int
        If `d` isn't specified in `run` or `run_steps`, this will be used.
        Default: 1.
    default_dt : float
        If `dt` isn't specified in `run`, `run_steps`, `ntrange`, or `trange`,
        this will be used. Default: 0.001 (1 millisecond).
    """
    default_size_out = IntParam(low=0)
    default_dt = NumberParam(low=0, low_open=True)

    def __init__(self):
        self.default_size_out = 1
        self.default_dt = 0.001

    def make_step(self, size_in, size_out, dt, rng):
        raise NotImplementedError("Process must implement `make_step` method.")

    def run_steps(self, n_steps, d=None, dt=None, rng=np.random):
        # TODO: allow running with input
        d = self.default_size_out if d is None else d
        dt = self.default_dt if dt is None else dt
        step = self.make_step(0, d, dt, rng)
        output = np.zeros((n_steps, d))
        for i in range(n_steps):
            output[i] = step(i * dt)
        return output

    def run(self, t, d=None, dt=None, rng=np.random):
        # TODO: allow running with input
        dt = self.default_dt if dt is None else dt
        n_steps = int(np.round(float(t) / dt))
        return self.run_steps(n_steps, d=d, dt=dt, rng=rng)

    def ntrange(self, n_steps, dt=None):
        dt = self.default_dt if dt is None else dt
        return dt * np.arange(1, n_steps + 1)

    def trange(self, t, dt=None):
        dt = self.default_dt if dt is None else dt
        n_steps = int(np.round(float(t) / dt))
        return self.ntrange(n_steps, dt=dt)
예제 #18
0
파일: neurons.py 프로젝트: jepetolee/nengo
class RatesToSpikesNeuronType(NeuronType):
    """Base class for neuron types that turn rate types into spiking ones."""

    base_type = NeuronTypeParam("base_type")
    amplitude = NumberParam("amplitude", low=0, low_open=True)
    spiking = True

    def __init__(self, base_type, amplitude=1.0, initial_state=None):
        super().__init__(initial_state)

        self.base_type = base_type
        self.amplitude = amplitude
        self.negative = base_type.negative

        if base_type.spiking:
            warnings.warn(
                f"'base_type' is type '{type(base_type).__name__}', which is a spiking "
                "neuron type. We recommend using the non-spiking equivalent type, "
                "if one exists.")

        for s in self.state:
            if s in self.base_type.state:
                raise ValidationError(
                    f"{self} and {self.base_type} have an overlapping "
                    f"state variable ({s})",
                    attr="state",
                    obj=self,
                )

    def gain_bias(self, max_rates, intercepts):
        return self.base_type.gain_bias(max_rates, intercepts)

    def max_rates_intercepts(self, gain, bias):
        return self.base_type.max_rates_intercepts(gain, bias)

    def rates(self, x, gain, bias):
        return self.base_type.rates(x, gain, bias)

    def step(self, dt, J, output, **state):
        raise NotImplementedError("Subclasses must implement step")

    @property
    def probeable(self):
        return ("output", "rate_out") + tuple(self.state) + tuple(
            self.base_type.state)
예제 #19
0
class PES(LearningRuleType):
    """Prescribed Error Sensitivity Learning Rule

    Modifies a connection's decoders to minimize an error signal.

    Parameters
    ----------
    pre_tau : float, optional
        Filter constant on activities of neurons in pre population.
        Defaults to 0.005.
    learning_rate : float, optional
        A scalar indicating the rate at which decoders will be adjusted.
        Defaults to 1e-5.

    Attributes
    ----------
    pre_tau : float
        Filter constant on activities of neurons in pre population.
    learning_rate : float
        The given learning rate.
    error_connection : Connection
        The modulatory connection created to project the error signal.
    """

    pre_tau = NumberParam('pre_tau', low=0, low_open=True)

    error_type = 'decoded'
    modifies = 'decoders'
    probeable = ['error', 'correction', 'activities', 'delta']

    def __init__(self, learning_rate=1e-4, pre_tau=0.005):
        if learning_rate >= 1.0:
            warnings.warn("This learning rate is very high, and can result "
                          "in floating point errors from too much current.")
        self.pre_tau = pre_tau
        super(PES, self).__init__(learning_rate)

    @property
    def _argreprs(self):
        args = []
        if self.learning_rate != 1e-4:
            args.append("learning_rate=%g" % self.learning_rate)
        if self.pre_tau != 0.005:
            args.append("pre_tau=%f" % self.pre_tau)
        return args
예제 #20
0
class LstsqNoise(Solver):
    """Least-squares solver with additive Gaussian white noise."""

    noise = NumberParam("noise", low=0)
    solver = LeastSquaresSolverParam("solver")

    def __init__(self, weights=False, noise=0.1, solver=lstsq.Cholesky()):
        super().__init__(weights=weights)
        self.noise = noise
        self.solver = solver

    def __call__(self, A, Y, rng=np.random):
        tstart = time.time()
        sigma = self.noise * np.amax(np.abs(A))
        A = A + rng.normal(scale=sigma, size=A.shape)
        X, info = self.solver(A, Y, 0, rng=rng)
        info["time"] = time.time() - tstart
        return X, info
예제 #21
0
class LearningRuleType(object):
    """Base class for all learning rule objects.

    To use a learning rule, pass it as a ``learning_rule`` keyword argument to
    the Connection on which you want to do learning.
    """

    learning_rate = NumberParam(low=0, low_open=True)
    probeable = []

    def __init__(self, learning_rate=1e-6):
        if learning_rate >= 1.0:
            warnings.warn("This learning rate is very high, and can result "
                          "in floating point errors from too much current.")
        self.learning_rate = learning_rate

    def __str__(self):
        return self.__class__.__name__
예제 #22
0
class RectifiedLinear(NeuronType):
    """A rectified linear neuron model.

    Each neuron is modeled as a rectified line. That is, the neuron's activity
    scales linearly with current, unless it passes below zero, at which point
    the neural activity will stay at zero.

    Parameters
    ----------
    amplitude : float
        Scaling factor on the neuron output. Corresponds to the relative
        amplitude of the output of the neuron.
    initial_state : {str: Distribution or array_like}
        Mapping from state variables names to their desired initial value.
        These values will override the defaults set in the class's state attribute.
    """

    state = {"rates": Choice([0])}
    negative = False

    amplitude = NumberParam("amplitude", low=0, low_open=True)

    def __init__(self, amplitude=1, initial_state=None):
        super().__init__(initial_state)

        self.amplitude = amplitude

    def gain_bias(self, max_rates, intercepts):
        """Determine gain and bias by shifting and scaling the lines."""
        max_rates = np.array(max_rates, dtype=float, copy=False, ndmin=1)
        intercepts = np.array(intercepts, dtype=float, copy=False, ndmin=1)
        gain = max_rates / (1 - intercepts)
        bias = -intercepts * gain
        return gain, bias

    def max_rates_intercepts(self, gain, bias):
        """Compute the inverse of gain_bias."""
        intercepts = -bias / gain
        max_rates = gain * (1 - intercepts)
        return max_rates, intercepts

    def step(self, dt, J, rates):
        """Implement the rectification nonlinearity."""
        rates[...] = self.amplitude * np.maximum(0.0, J)
예제 #23
0
class Voja(LearningRuleType):
    """Vector Oja learning rule.

    Modifies an ensemble's encoders to be selective to its inputs.

    A connection to the learning rule will provide a scalar weight for the
    learning rate, minus 1. For instance, 0 is normal learning, -1 is no
    learning, and less than -1 causes anti-learning or "forgetting".

    Parameters
    ----------
    post_tau : float, optional (Default: 0.005)
        Filter constant on activities of neurons in post population.
    learning_rate : float, optional (Default: 1e-2)
        A scalar indicating the rate at which encoders will be adjusted.

    Attributes
    ----------
    learning_rate : float
        A scalar indicating the rate at which encoders will be adjusted.
    post_tau : float
        Filter constant on activities of neurons in post population.
    """

    error_type = 'scalar'
    modifies = 'encoders'
    probeable = ('post_filtered', 'scaled_encoders', 'delta')

    post_tau = NumberParam('post_tau', low=0, low_open=True, optional=True)

    def __init__(self, post_tau=0.005, learning_rate=1e-2):
        self.post_tau = post_tau
        super(Voja, self).__init__(learning_rate)

    @property
    def _argreprs(self):
        args = []
        if self.post_tau is None:
            args.append("post_tau=%s" % self.post_tau)
        elif self.post_tau != 0.005:
            args.append("post_tau=%g" % self.post_tau)
        if self.learning_rate != 1e-2:
            args.append("learning_rate=%g" % self.learning_rate)
        return args
예제 #24
0
class NnlsL2(Nnls):
    """Non-negative least-squares solver with L2 regularization.

    Similar to `.LstsqL2`, except the output values are non-negative.
    """

    reg = NumberParam('reg', low=0)

    def __init__(self, weights=False, reg=0.1):
        """
        .. note:: Requires
                  `SciPy <http://docs.scipy.org/doc/scipy/reference/>`_.

        Parameters
        ----------
        weights : bool, optional (Default: False)
            If False, solve for decoders. If True, solve for weights.
        reg : float, optional (Default: 0.1)
            Amount of regularization, as a fraction of the neuron activity.

        Attributes
        ----------
        reg : float
            Amount of regularization, as a fraction of the neuron activity.
        weights : bool
            If False, solve for decoders. If True, solve for weights.
        """
        super(NnlsL2, self).__init__(weights=weights)
        self.reg = reg

    def _solve(self, A, Y, rng, E, sigma):
        tstart = time.time()
        # form Gram matrix so we can add regularization
        GA = np.dot(A.T, A)
        GY = np.dot(A.T, Y)
        np.fill_diagonal(GA, GA.diagonal() + A.shape[0] * sigma**2)
        X, info = super(NnlsL2, self).__call__(GA, GY, rng=rng, E=E)
        t = time.time() - tstart
        # recompute the RMSE in terms of the original matrices
        info = {'rmses': rmses(A, X, Y), 'gram_info': info, 'time': t}
        return X, info

    def __call__(self, A, Y, rng=None, E=None):
        return self._solve(A, Y, rng, E, sigma=self.reg * A.max())
예제 #25
0
class Alpha(LinearFilter):
    """Alpha-function filter synapse.

    The impulse-response function is given by::

        alpha(t) = (t / tau) * exp(-t / tau)

    and was found by [1]_ to be a good basic model for synapses.

    Parameters
    ----------
    tau : float
        The time constant of the filter in seconds.

    Attributes
    ----------
    tau : float
        The time constant of the filter in seconds.

    References
    ----------
    .. [1] Mainen, Z.F. and Sejnowski, T.J. (1995). Reliability of spike timing
       in neocortical neurons. Science (New York, NY), 268(5216):1503-6.
    """

    tau = NumberParam('tau', low=0)

    def __init__(self, tau, **kwargs):
        super(Alpha, self).__init__([1], [tau**2, 2*tau, 1], **kwargs)
        self.tau = tau

    def __repr__(self):
        return "%s(%r)" % (type(self).__name__, self.tau)

    def make_step(self, shape_in, shape_out, dt, rng, y0=None,
                  dtype=np.float64, **kwargs):
        """Returns an optimized `.LinearFilter.Step` subclass."""
        # if tau < 0.03 * dt, exp(-dt / tau) < 1e-14, so just make it zero
        if self.tau <= .03 * dt:
            return self._make_zero_step(
                shape_in, shape_out, dt, rng, y0=y0, dtype=dtype)
        return super(Alpha, self).make_step(
            shape_in, shape_out, dt, rng, y0=y0, dtype=dtype, **kwargs)
예제 #26
0
class Concatenate(Distribution):
    """Concatenate distributions to form an independent multivariate"""

    distributions = TupleParam('distributions', readonly=True)
    d = NumberParam('d', low=1, readonly=True)

    def __init__(self, distributions):
        super(Concatenate, self).__init__()
        self.distributions = distributions

        # --- determine dimensionality
        rng = np.random.RandomState(0)
        s = np.column_stack([d.sample(1, rng=rng) for d in self.distributions])
        self.d = s.shape[1]

    def sample(self, n, d=None, rng=np.random):
        assert d is None or d == self.d
        return np.column_stack(
            [dist.sample(n, rng=rng) for dist in self.distributions])
예제 #27
0
class PES(LearningRuleType):
    """Prescribed Error Sensitivity learning rule.

    Modifies a connection's decoders to minimize an error signal provided
    through a connection to the connection's learning rule.

    Parameters
    ----------
    learning_rate : float, optional
        A scalar indicating the rate at which weights will be adjusted.
    pre_synapse : `.Synapse`, optional
        Synapse model used to filter the pre-synaptic activities.

    Attributes
    ----------
    learning_rate : float
        A scalar indicating the rate at which weights will be adjusted.
    pre_synapse : `.Synapse`
        Synapse model used to filter the pre-synaptic activities.
    """

    modifies = "decoders"
    probeable = ("error", "activities", "delta")

    learning_rate = NumberParam("learning_rate", low=0, readonly=True, default=1e-4)
    pre_synapse = SynapseParam("pre_synapse", default=Lowpass(tau=0.005), readonly=True)

    def __init__(self, learning_rate=Default, pre_synapse=Default):
        super().__init__(learning_rate, size_in="post_state")
        if learning_rate is not Default and learning_rate >= 1.0:
            warnings.warn(
                "This learning rate is very high, and can result "
                "in floating point errors from too much current."
            )

        self.pre_synapse = pre_synapse

    @property
    def _argdefaults(self):
        return (
            ("learning_rate", PES.learning_rate.default),
            ("pre_synapse", PES.pre_synapse.default),
        )
예제 #28
0
class RatesToSpikesNeuronType(NeuronType):
    """Base class for neuron types that turn rate types into spiking ones."""

    base_type = NeuronTypeParam("base_type")
    amplitude = NumberParam("amplitude", low=0, low_open=True)
    spiking = True

    def __init__(self, base_type, amplitude=1.0, initial_state=None):
        super().__init__(initial_state)

        self.base_type = base_type
        self.amplitude = amplitude
        self.negative = base_type.negative

        if base_type.spiking:
            warnings.warn(
                "'base_type' is type %r, which is a spiking neuron type. We recommend "
                "using the non-spiking equivalent type, if one exists."
                % (type(base_type).__name__)
            )

        for s in self.state:
            if s in self.base_type.state:
                raise ValidationError(
                    "%s and %s have overlapping state variable (%s)"
                    % (self, self.base_type, s),
                    attr="state",
                    obj=self,
                )

    def gain_bias(self, max_rates, intercepts):
        return self.base_type.gain_bias(max_rates, intercepts)

    def max_rates_intercepts(self, gain, bias):
        return self.base_type.max_rates_intercepts(gain, bias)

    def rates(self, x, gain, bias):
        return self.base_type.rates(x, gain, bias)

    @property
    def probeable(self):
        return ("output", "rate_out") + tuple(self.state) + tuple(self.base_type.state)
예제 #29
0
class PES(LearningRuleType):
    """Prescribed Error Sensitivity learning rule.

    Modifies a connection's decoders to minimize an error signal provided
    through a connection to the connection's learning rule.

    Parameters
    ----------
    learning_rate : float, optional (Default: 1e-4)
        A scalar indicating the rate at which weights will be adjusted.
    pre_tau : float, optional (Default: 0.005)
        Filter constant on activities of neurons in pre population.

    Attributes
    ----------
    learning_rate : float
        A scalar indicating the rate at which weights will be adjusted.
    pre_tau : float
        Filter constant on activities of neurons in pre population.
    """

    error_type = 'decoded'
    modifies = 'decoders'
    probeable = ('error', 'correction', 'activities', 'delta')

    pre_tau = NumberParam('pre_tau', low=0, low_open=True)

    def __init__(self, learning_rate=1e-4, pre_tau=0.005):
        if learning_rate >= 1.0:
            warnings.warn("This learning rate is very high, and can result "
                          "in floating point errors from too much current.")
        self.pre_tau = pre_tau
        super(PES, self).__init__(learning_rate)

    @property
    def _argreprs(self):
        args = []
        if self.learning_rate != 1e-4:
            args.append("learning_rate=%g" % self.learning_rate)
        if self.pre_tau != 0.005:
            args.append("pre_tau=%g" % self.pre_tau)
        return args
예제 #30
0
class Voja(LearningRuleType):
    """Vector Oja learning rule.

    Modifies an ensemble's encoders to be selective to its inputs.

    A connection to the learning rule will provide a scalar weight for the
    learning rate, minus 1. For instance, 0 is normal learning, -1 is no
    learning, and less than -1 causes anti-learning or "forgetting".

    Parameters
    ----------
    post_tau : float, optional
        Filter constant on activities of neurons in post population.
    learning_rate : float, optional
        A scalar indicating the rate at which encoders will be adjusted.
    post_synapse : `.Synapse`, optional
        Synapse model used to filter the post-synaptic activities.

    Attributes
    ----------
    learning_rate : float
        A scalar indicating the rate at which encoders will be adjusted.
    post_synapse : `.Synapse`
        Synapse model used to filter the post-synaptic activities.
    """

    modifies = "encoders"
    probeable = ("post_filtered", "scaled_encoders", "delta")

    learning_rate = NumberParam("learning_rate",
                                low=0,
                                readonly=True,
                                default=1e-2)
    post_synapse = SynapseParam("post_synapse",
                                default=Lowpass(tau=0.005),
                                readonly=True)

    def __init__(self, learning_rate=Default, post_synapse=Default):
        super().__init__(learning_rate, size_in=1)

        self.post_synapse = post_synapse