Пример #1
0
class HSP(LearningRuleType):
    """Hebbian synaptic plasticity learning rule.  Modifies connection weights
    according to the presynaptic and postsynaptic firing rates and the
    target firing rate.

    """
    modifies = 'weights'
    probeable = ('pre_filtered', 'post_filtered', 'delta')
    
    pre_synapse = SynapseParam("pre_synapse", default=Lowpass(tau=0.005), readonly=True)
    post_synapse = SynapseParam("post_synapse", default=None, readonly=True)
    jit = BoolParam("jit", default=True, readonly=True)
    directed = BoolParam("directed", default=False, readonly=True)

    def __init__(self,
                 pre_synapse=Default,
                 post_synapse=Default,
                 directed=Default,
                 jit=Default):
        super().__init__(size_in=1)
        self.pre_synapse = pre_synapse
        self.post_synapse = (
            self.pre_synapse if post_synapse is Default else post_synapse
        )
        self.jit = jit
        self.directed = directed

    @property
    def _argreprs(self):
        return _remove_default_post_synapse(super()._argreprs, self.pre_synapse)
Пример #2
0
class _LstsqNoiseSolver(Solver):
    """Base class for least-squares solvers with noise."""

    weights = BoolParam('weights')
    noise = NumberParam('noise', low=0)
    solver = LeastSquaresSolverParam('solver')

    def __init__(self, weights=False, noise=0.1, solver=lstsq.Cholesky()):
        """
        Parameters
        ----------
        weights : bool, optional (Default: False)
            If False, solve for decoders. If True, solve for weights.
        noise : float, optional (Default: 0.1)
            Amount of noise, as a fraction of the neuron activity.
        solver : `.LeastSquaresSolver`, optional (Default: ``Cholesky()``)
            Subsolver to use for solving the least squares problem.

        Attributes
        ----------
        noise : float
            Amount of noise, as a fraction of the neuron activity.
        solver : `.LeastSquaresSolver`
            Subsolver to use for solving the least squares problem.
        weights : bool
            If False, solve for decoders. If True, solve for weights.
        """
        self.weights = weights
        self.noise = noise
        self.solver = solver
Пример #3
0
class QPSolver(ExtendedSolver):

    reg = NumberParam('reg', low=0)
    relax = BoolParam('relax')

    def __init__(self, reg=1e-3, relax=False):
        super().__init__()
        self.reg = reg
        self.relax = relax

    def __call__(self, A, J, connectivity, i_th, tuning, rng=np.random):
        # Neuron model parameters. For now we only support current-based LIF
        if tuning is None:
            ws = np.array((0.0, 1.0, -1.0, 1.0, 0.0, 0.0))
        else:
            ws = tuning

        # Determine the final regularisatio nparameter
        reg = (self.reg * np.max(A))**2 * A.shape[1]

        # If subthreshold relaxation is switched off, set the spike threshold
        # to "None"
        i_th = None if not self.relax else i_th

        # Use the faster NNLS solver instead of CVXOPT if we do not need
        # current relaxation
        use_lstsq = i_th is None

        return qp_solver.solve(A, J, ws, connectivity,
                               iTh=i_th, reg=reg, use_lstsq=use_lstsq)
Пример #4
0
class CDISP(LearningRuleType):
    """Inhibitory plasticity learning rule.  Modifies connection weights
    according to the presynaptic and postsynaptic firing rates and the
    amplitude of the error signal.

    """
    modifies = 'weights'
    probeable = ('error_filtered', 'pre_filtered', 'post_filtered', 'delta')
    
    learning_rate = NumberParam("learning_rate", low=0, readonly=True, default=1e-6)
    pre_synapse = SynapseParam("pre_synapse", default=Lowpass(tau=0.02), readonly=True)
    post_synapse = SynapseParam("post_synapse", default=None, readonly=True)
    sigma_synapse = SynapseParam("sigma_synapse", default=Lowpass(tau=0.005), readonly=True)

    jit = BoolParam("jit", default=True, readonly=True)

    def __init__(self,
                 learning_rate=Default,
                 sigma_synapse=Default,
                 pre_synapse=Default,
                 post_synapse=Default,
                 jit=Default):
        super(CDISP, self).__init__(learning_rate, size_in="post")
        self.sigma_synapse = sigma_synapse
        self.pre_synapse = pre_synapse
        self.post_synapse = (
            self.pre_synapse if post_synapse is Default else post_synapse
        )
        self.jit = jit

    @property
    def _argreprs(self):
        return _remove_default_post_synapse(super()._argreprs, self.pre_synapse)
Пример #5
0
class GSP(LearningRuleType):
    """Gating plasticity learning rule.  Modifies connection weights
    according to the postsynaptic firing rates.

    """
    modifies = 'weights'
    probeable = ('pre_filtered', 'post_filtered', 'delta')
    
    learning_rate = NumberParam("learning_rate", low=0, readonly=True, default=1e-6)
    pre_synapse = SynapseParam("pre_synapse", default=Lowpass(tau=0.01), readonly=True)
    post_synapse = SynapseParam("post_synapse", default=Lowpass(tau=0.1), readonly=True)
    jit = BoolParam("jit", default=True, readonly=True)

    def __init__(self,
                 learning_rate=Default,
                 pre_synapse=Default,
                 post_synapse=Default,
                 jit=Default):
        super().__init__(learning_rate, size_in=0)
        self.pre_synapse = pre_synapse
        self.post_synapse = (
            self.pre_synapse if post_synapse is Default else post_synapse
        )
        self.jit = jit

    @property
    def _argreprs(self):
        return _remove_default_post_synapse(super()._argreprs, self.pre_synapse)
Пример #6
0
def configure_trainable(config, default=None):
    """Adds a configurable attribute called ``trainable`` to trainable objects.

    Used to manually configure whether or not those parts of the model can
    be optimized by :meth:`.Simulator.train`.

    Parameters
    ----------
    config : :class:`~nengo:nengo.config.Config` or \
             :class:`~nengo:nengo.Network`
        the config object to be modified (or a Network to modify
        ``net.config``)
    default : bool, optional
        the default value for ``trainable`` (``None`` means that the value
        is deferred to the parent config, or ``True`` if this is the top-level
        config)
    """

    if isinstance(config, Network):
        config = config.config

    for obj in (Ensemble, Connection, ensemble.Neurons):
        try:
            params = config[obj]
        except ConfigError:
            config.configures(obj)
            params = config[obj]

        params.set_param("trainable",
                         BoolParam("trainable", default, optional=True))
Пример #7
0
class ISP(LearningRuleType):
    """Inhibitory plasticity learning rule.  Modifies connection weights
    according to the presynaptic and postsynaptic firing rates and the
    target firing rate.

    """
    modifies = 'weights'
    probeable = ('pre_filtered', 'post_filtered', 'delta')
    
    rho0 = NumberParam("rho0", low=0, readonly=True, default=10.)
    pre_synapse = SynapseParam("pre_synapse", default=Lowpass(tau=0.01), readonly=True)
    post_synapse = SynapseParam("post_synapse", default=None, readonly=True)
    jit = BoolParam("jit", default=True, readonly=True)

    def __init__(self,
                 rho0=Default,
                 pre_synapse=Default,
                 post_synapse=Default,
                 jit=Default):
        super().__init__(size_in=1)
        self.rho0 = rho0
        self.pre_synapse = pre_synapse
        self.post_synapse = (
            self.pre_synapse if post_synapse is Default else post_synapse
        )
        self.jit = jit

    @property
    def _argreprs(self):
        return _remove_default_post_synapse(super()._argreprs, self.pre_synapse)
Пример #8
0
class _LstsqL2Solver(Solver):
    """Base class for L2-regularized least-squares solvers."""

    weights = BoolParam('weights')
    reg = NumberParam('reg', low=0)
    solver = LeastSquaresSolverParam('solver')

    def __init__(self, weights=False, reg=0.1, solver=lstsq.Cholesky()):
        """
        Parameters
        ----------
        weights : bool, optional (Default: False)
            If False, solve for decoders. If True, solve for weights.
        reg : float, optional (Default: 0.1)
            Amount of regularization, as a fraction of the neuron activity.
        solver : `.LeastSquaresSolver`, optional (Default: ``Cholesky()``)
            Subsolver to use for solving the least squares problem.

        Attributes
        ----------
        reg : float
            Amount of regularization, as a fraction of the neuron activity.
        solver : `.LeastSquaresSolver`
            Subsolver to use for solving the least squares problem.
        weights : bool
            If False, solve for decoders. If True, solve for weights.
        """
        self.weights = weights
        self.reg = reg
        self.solver = solver
Пример #9
0
class Solver(with_metaclass(DocstringInheritor, FrozenObject)):
    """Decoder or weight solver."""

    weights = BoolParam('weights')

    def __init__(self, weights=False):
        super(Solver, self).__init__()
        self.weights = weights

    def __call__(self, A, Y, rng=None, E=None):
        """Call the solver.

        Parameters
        ----------
        A : (n_eval_points, n_neurons) array_like
            Matrix of the neurons' activities at the evaluation points
        Y : (n_eval_points, dimensions) array_like
            Matrix of the target decoded values for each of the D dimensions,
            at each of the evaluation points.
        rng : `numpy.random.RandomState`, optional (Default: None)
            A random number generator to use as required. If None,
            the ``numpy.random`` module functions will be used.
        E : (dimensions, post.n_neurons) array_like, optional (Default: None)
            Array of post-population encoders. Providing this tells the solver
            to return an array of connection weights rather than decoders.

        Returns
        -------
        X :  (n_neurons, dimensions) or (n_neurons, post.n_neurons) ndarray
            (n_neurons, dimensions) array of decoders (if ``solver.weights``
            is False) or (n_neurons, post.n_neurons) array of weights
            (if ``'solver.weights`` is True).
        info : dict
            A dictionary of information about the solver. All dictionaries have
            an ``'rmses'`` key that contains RMS errors of the solve.
            Other keys are unique to particular solvers.
        """
        raise NotImplementedError("Solvers must implement '__call__'")

    def mul_encoders(self, Y, E, copy=False):
        """Helper function that projects signal ``Y`` onto encoders ``E``.

        Parameters
        ----------
        Y : ndarray
            The signal of interest.
        E : (dimensions, n_neurons) array_like or None
            Array of encoders. If None, ``Y`` will be returned unchanged.
        copy : bool, optional (Default: False)
            Whether a copy of ``Y`` should be returned if ``E`` is None.
        """
        if self.weights and E is None:
            raise ValidationError(
                "Encoders must be provided for weight solver", attr='E')
        if not self.weights and E is not None:
            raise ValidationError(
                "Encoders must be 'None' for decoder solver", attr='E')

        return np.dot(Y, E) if E is not None else Y.copy() if copy else Y
Пример #10
0
class FilteredNoise(Process):
    """Filtered white noise process.

    This process takes white noise and filters it using the provided synapse.

    Parameters
    ----------
    synapse : Synapse, optional (Default: ``Lowpass(tau=0.005)``)
        The synapse to use to filter the noise.
    dist : Distribution, optional (Default: ``Gaussian(mean=0, std=1)``)
        The distribution used to generate the white noise.
    scale : bool, optional (Default: True)
        Whether to scale the white noise for integration, making the output
        signal invariant to ``dt``.
    synapse_kwargs : dict, optional (Default: None)
        Arguments to pass to ``synapse.make_step``.
    seed : int, optional (Default: None)
        Random number seed. Ensures noise will be the same each run.
    """

    synapse = SynapseParam('synapse')
    dist = DistributionParam('dist')
    scale = BoolParam('scale')
    synapse_kwargs = DictParam('synapse_kwargs')

    def __init__(self,
                 synapse=Lowpass(tau=0.005),
                 dist=Gaussian(mean=0, std=1),
                 scale=True,
                 synapse_kwargs=None,
                 **kwargs):
        super(FilteredNoise, self).__init__(default_size_in=0, **kwargs)
        self.synapse = synapse
        self.synapse_kwargs = {} if synapse_kwargs is None else synapse_kwargs
        self.dist = dist
        self.scale = scale

    def __repr__(self):
        return "%s(synapse=%r, dist=%r, scale=%r)" % (
            type(self).__name__, self.synapse, self.dist, self.scale)

    def make_step(self, shape_in, shape_out, dt, rng):
        assert shape_in == (0, )
        assert len(shape_out) == 1

        dist = self.dist
        scale = self.scale
        alpha = 1. / np.sqrt(dt)
        filter_step = self.synapse.make_step(shape_out, shape_out, dt, None,
                                             **self.synapse_kwargs)

        def step_filterednoise(t):
            x = dist.sample(n=1, d=shape_out[0], rng=rng)[0]
            if scale:
                x *= alpha
            return filter_step(t, x)

        return step_filterednoise
Пример #11
0
class FilteredNoise(Process):
    """Filtered white noise process.

    This process takes white noise and filters it using the provided synapse.

    Parameters
    ----------
    synapse : Synapse, optional
        The synapse to use to filter the noise. Default: Lowpass(tau=0.005)
    synapse_kwargs : dict, optional
        Arguments to pass to `synapse.make_step`.
    dist : Distribution, optional
        The distribution used to generate the white noise.
        Default: Gaussian(mean=0, std=1)
    scale : bool, optional
        Whether to scale the white noise for integration, making the output
        signal invariant to `dt`. Defaults to True.
    seed : int, optional
        Random number seed. Ensures noise will be the same each run.
    """

    synapse = LinearFilterParam('synapse')
    dist = DistributionParam('dist')
    scale = BoolParam('scale')

    def __init__(self,
                 synapse=Lowpass(tau=0.005),
                 synapse_kwargs={},
                 dist=Gaussian(mean=0, std=1),
                 scale=True,
                 seed=None):
        super(FilteredNoise, self).__init__(seed=seed)
        self.synapse = synapse
        self.synapse_kwargs = synapse_kwargs
        self.dist = dist
        self.scale = scale

    def __repr__(self):
        return "%s(synapse=%r, dist=%r, scale=%r)" % (
            self.__class__.__name__, self.synapse, self.dist, self.scale)

    def make_step(self, size_in, size_out, dt, rng):
        assert size_in == 0

        dist = self.dist
        scale = self.scale
        alpha = 1. / np.sqrt(dt)
        output = np.zeros(size_out)
        filter_step = self.synapse.make_step(dt, output, **self.synapse_kwargs)

        def step_filterednoise(t):
            x = dist.sample(n=1, d=size_out, rng=rng)[0]
            if scale:
                x *= alpha
            filter_step(x)
            return output

        return step_filterednoise
Пример #12
0
class FilteredNoise(Process):
    """Filtered white noise process.

    This process takes white noise and filters it using the provided synapse.

    Parameters
    ----------
    synapse : Synapse, optional
        The synapse to use to filter the noise.
    dist : Distribution, optional
        The distribution used to generate the white noise.
    scale : bool, optional
        Whether to scale the white noise for integration, making the output
        signal invariant to ``dt``.
    seed : int, optional
        Random number seed. Ensures noise will be the same each run.
    """

    synapse = SynapseParam("synapse")
    dist = DistributionParam("dist")
    scale = BoolParam("scale")

    def __init__(
            self,
            synapse=Lowpass(tau=0.005),
            dist=Gaussian(mean=0, std=1),
            scale=True,
            **kwargs,
    ):
        super().__init__(default_size_in=0, **kwargs)
        self.synapse = synapse
        self.dist = dist
        self.scale = scale

    def make_state(self, shape_in, shape_out, dt, dtype=None):
        return self.synapse.make_state(shape_out, shape_out, dt, dtype=dtype)

    def make_step(self, shape_in, shape_out, dt, rng, state):
        assert shape_in == (0, )
        assert len(shape_out) == 1

        dist = self.dist
        scale = self.scale
        alpha = 1.0 / np.sqrt(dt)
        filter_step = self.synapse.make_step(shape_out, shape_out, dt, rng,
                                             state)

        def step_filterednoise(t):
            x = dist.sample(n=1, d=shape_out[0], rng=rng)[0]
            if scale:
                x *= alpha
            return filter_step(t, x)

        return step_filterednoise
Пример #13
0
class UniformHypersphere(Distribution):
    """Uniform distribution on or in an n-dimensional unit hypersphere.

    Sample points are uniformly distributed across the volume (default) or
    surface of an n-dimensional unit hypersphere.

    Parameters
    ----------
    surface : bool, optional (Default: False)
        Whether sample points should be distributed uniformly
        over the surface of the hyperphere (True),
        or within the hypersphere (False).
    min_magnitude : Number, optional (Default: 0)
        Lower bound on the returned vector magnitudes (such that they are in
        the range ``[min_magnitude, 1]``). Must be in the range [0, 1).
        Ignored if ``surface`` is ``True``.
    """

    surface = BoolParam('surface')
    min_magnitude = NumberParam('min_magnitude', low=0, high=1, high_open=True)

    def __init__(self, surface=False, min_magnitude=0):
        super(UniformHypersphere, self).__init__()
        if surface and min_magnitude > 0:
            warnings.warn("min_magnitude ignored because surface is True")
        self.surface = surface
        self.min_magnitude = min_magnitude

    def __repr__(self):
        args = []
        if self.surface:
            args.append("surface=%s" % self.surface)
        if self.min_magnitude > 0:
            args.append("min_magnitude=%r" % self.min_magnitude)
        return "%s(%s)" % (type(self).__name__, ', '.join(args))

    def sample(self, n, d, rng=np.random):
        if d is None or d < 1:  # check this, since other dists allow d = None
            raise ValidationError("Dimensions must be a positive integer", 'd')

        samples = rng.randn(n, d)
        samples /= npext.norm(samples, axis=1, keepdims=True)

        if self.surface:
            return samples

        # Generate magnitudes for vectors from uniform distribution.
        # The (1 / d) exponent ensures that samples are uniformly distributed
        # in n-space and not all bunched up at the centre of the sphere.
        samples *= rng.uniform(low=self.min_magnitude**d, high=1,
                               size=(n, 1))**(1. / d)

        return samples
Пример #14
0
class GDHL(LearningRuleType):
    """General differential hebbian learning rule.  Modifies connection
    weights according to several components based on presynaptic and
    postsynaptic firing rates and their derivatives.
    """
    modifies = 'weights'
    probeable = ('pre_filtered', 'post_filtered', 'delta')

    sigma = DictParam("sigma",
                      default={
                          'pp': 0.,
                          'pn': 0.,
                          'np': 0.,
                          'nn': 0.
                      },
                      readonly=True)
    eta = DictParam("eta",
                    default={
                        'sp': 0.,
                        'sn': 0.,
                        'ps': 0.,
                        'ns': 0.
                    },
                    readonly=True)
    learning_rate = NumberParam("learning_rate",
                                low=0,
                                readonly=True,
                                default=1e-6)
    pre_synapse = SynapseParam("pre_synapse",
                               default=Lowpass(tau=0.005),
                               readonly=True)
    post_synapse = SynapseParam("post_synapse", default=None, readonly=True)
    jit = BoolParam("jit", default=True, readonly=True)

    def __init__(self,
                 sigma=Default,
                 eta=Default,
                 learning_rate=Default,
                 pre_synapse=Default,
                 post_synapse=Default,
                 jit=Default):
        super().__init__(learning_rate, size_in=0)
        self.eta = eta
        self.sigma = sigma
        self.pre_synapse = pre_synapse
        self.post_synapse = (self.pre_synapse
                             if post_synapse is Default else post_synapse)
        self.jit = jit

    @property
    def _argreprs(self):
        return _remove_default_post_synapse(super()._argreprs,
                                            self.pre_synapse)
Пример #15
0
def add_spinnaker_params(config):
    """Add SpiNNaker specific parameters to a configuration object."""
    # Add simulator parameters
    config.configures(Simulator)

    config[Simulator].set_param("placer", CallableParameter(default=par.place))
    config[Simulator].set_param("placer_kwargs", DictParam(default={}))

    config[Simulator].set_param("allocater",
                                CallableParameter(default=par.allocate))
    config[Simulator].set_param("allocater_kwargs", DictParam(default={}))

    config[Simulator].set_param("router", CallableParameter(default=par.route))
    config[Simulator].set_param("router_kwargs", DictParam(default={}))

    config[Simulator].set_param("node_io", Parameter(default=Ethernet))
    config[Simulator].set_param("node_io_kwargs", DictParam(default={}))

    # Add function_of_time parameters to Nodes
    config[nengo.Node].set_param("function_of_time", BoolParam(default=False))
    config[nengo.Node].set_param("function_of_time_period",
                                 NumberParam(default=None, optional=True))

    # Add multiple-core options to Nodes
    config[nengo.Node].set_param(
        "n_cores_per_chip",
        IntParam(default=None, low=1, high=16, optional=True))
    config[nengo.Node].set_param("n_chips",
                                 IntParam(default=None, low=1, optional=True))
    # Add optimisation control parameters to (passthrough) Nodes. None means
    # that a heuristic will be used to determine if the passthrough Node should
    # be removed.
    config[nengo.Node].set_param("optimize_out",
                                 BoolParam(default=None, optional=True))

    # Add profiling parameters to Ensembles
    config[nengo.Ensemble].set_param("profile", BoolParam(default=False))
    config[nengo.Ensemble].set_param("profile_num_samples",
                                     NumberParam(default=None, optional=True))
Пример #16
0
class FilteredNoise(Process):
    """Filtered white noise process.

    This process takes white noise and filters it using the provided synapse.

    Parameters
    ----------
    synapse : Synapse, optional
        The synapse to use to filter the noise. Default: Lowpass(tau=0.005)
    synapse_kwargs : dict, optional
        Arguments to pass to `synapse.make_step`.
    dist : Distribution, optional
        The distribution used to generate the white noise.
        Default: Gaussian(mean=0, std=1)
    scale : bool, optional
        Whether to scale the white noise for integration, making the output
        signal invariant to `dt`. Defaults to True.
    """

    synapse = LinearFilterParam()
    dist = DistributionParam()
    scale = BoolParam()

    def __init__(self, synapse=None, synapse_kwargs={}, dist=None, scale=True):
        super(FilteredNoise, self).__init__()
        self.synapse = Lowpass(tau=0.005) if synapse is None else synapse
        self.synapse_kwargs = synapse_kwargs
        self.dist = Gaussian(mean=0, std=1) if dist is None else dist
        self.scale = scale

    def make_step(self, size_in, size_out, dt, rng):
        assert size_in == 0

        dist = self.dist
        scale = self.scale
        alpha = 1. / np.sqrt(dt)
        output = np.zeros(size_out)
        filter_step = self.synapse.make_step(dt, output, **self.synapse_kwargs)

        # separate RNG for simulation for step order independence
        sim_rng = np.random.RandomState(rng.randint(npext.maxint))

        def step(t):
            x = dist.sample(n=1, d=size_out, rng=sim_rng)[0]
            if scale:
                x *= alpha
            filter_step(x)
            return output

        return step
Пример #17
0
class WhiteNoise(Process):
    """Full-spectrum white noise process.

    Parameters
    ----------
    dist : Distribution, optional (Default: ``Gaussian(mean=0, std=1)``)
        The distribution from which to draw samples.
    scale : bool, optional (Default: True)
        Whether to scale the white noise for integration. Integrating white
        noise requires using a time constant of ``sqrt(dt)`` instead of ``dt``
        on the noise term [1]_, to ensure the magnitude of the integrated
        noise does not change with ``dt``.
    seed : int, optional (Default: None)
        Random number seed. Ensures noise will be the same each run.

    References
    ----------
    .. [1] Gillespie, D.T. (1996) Exact numerical simulation of the Ornstein-
       Uhlenbeck process and its integral. Phys. Rev. E 54, pp. 2084-91.
    """

    dist = DistributionParam('dist')
    scale = BoolParam('scale')

    def __init__(self, dist=Gaussian(mean=0, std=1), scale=True, **kwargs):
        super(WhiteNoise, self).__init__(default_size_in=0, **kwargs)
        self.dist = dist
        self.scale = scale

    def __repr__(self):
        return "%s(%r, scale=%r)" % (type(self).__name__, self.dist,
                                     self.scale)

    def make_step(self, shape_in, shape_out, dt, rng):
        assert shape_in == (0, )
        assert len(shape_out) == 1

        dist = self.dist
        scale = self.scale
        alpha = 1. / np.sqrt(dt)

        # ^ need sqrt(dt) when integrating, so divide by sqrt(dt) here,
        #   since dt / sqrt(dt) = sqrt(dt).

        def step_whitenoise(t):
            x = dist.sample(n=1, d=shape_out[0], rng=rng)[0]
            return alpha * x if scale else x

        return step_whitenoise
Пример #18
0
class WhiteNoise(Process):
    """Full-spectrum white noise process.

    Parameters
    ----------
    dist : Distribution, optional
        The distribution to draw samples from.
        Default: Gaussian(mean=0, std=1)
    scale : bool, optional
        Whether to scale the white noise for integration. Integrating white
        noise requires using a time constant of `sqrt(dt)` instead of `dt`
        on the noise term [1]_, to ensure the magnitude of the integrated
        noise does not change with `dt`. Defaults to True.
    seed : int, optional
        Random number seed. Ensures noise will be the same each run.

    References
    ----------
    .. [1] Gillespie, D.T. (1996) Exact numerical simulation of the Ornstein-
       Uhlenbeck process and its integral. Phys. Rev. E 54, pp. 2084-91.
    """

    dist = DistributionParam()
    scale = BoolParam()

    def __init__(self, dist=Gaussian(mean=0, std=1), scale=True, seed=None):
        super(WhiteNoise, self).__init__(seed=seed)
        self.dist = dist
        self.scale = scale

    def __repr__(self):
        return "%s(%r, scale=%r)" % (
            self.__class__.__name__, self.dist, self.scale)

    def make_step(self, size_in, size_out, dt, rng):
        assert size_in == 0

        dist = self.dist
        scale = self.scale
        alpha = 1. / np.sqrt(dt)
        # ^ need sqrt(dt) when integrating, so divide by sqrt(dt) here,
        #   since dt / sqrt(dt) = sqrt(dt).
        sim_rng = self.get_sim_rng(rng)

        def step(t):
            x = dist.sample(n=1, d=size_out, rng=sim_rng)[0]
            return alpha * x if scale else x

        return step
Пример #19
0
class WhiteNoise(Process):
    """Full-spectrum white noise process.

    Parameters
    ----------
    dist : Distribution, optional
        The distribution to draw samples from.
        Default: Gaussian(mean=0, std=1)
    scale : bool, optional
        Whether to scale the white noise for integration. Integrating white
        noise requires using a time constant of `sqrt(dt)` instead of `dt`
        on the noise term [1]_, to ensure the magnitude of the integrated
        noise does not change with `dt`. Defaults to True.

    References
    ----------
    .. [1] Gillespie, D.T. (1996) Exact numerical simulation of the Ornstein-
       Uhlenbeck process and its integral. Phys. Rev. E 54, pp. 2084-91.
    """

    dist = DistributionParam()
    scale = BoolParam()

    def __init__(self, dist=None, scale=True):
        super(WhiteNoise, self).__init__()
        self.dist = Gaussian(mean=0, std=1) if dist is None else dist
        self.scale = scale

    def make_step(self, size_in, size_out, dt, rng):
        assert size_in == 0

        dist = self.dist
        scale = self.scale
        alpha = 1. / np.sqrt(dt)
        # ^ need sqrt(dt) when integrating, so divide by sqrt(dt) here,
        #   since dt / sqrt(dt) = sqrt(dt).

        # separate RNG for simulation for step order independence
        sim_rng = np.random.RandomState(rng.randint(npext.maxint))

        def step(t):
            x = dist.sample(n=1, d=size_out, rng=sim_rng)[0]
            return alpha * x if scale else x

        return step
Пример #20
0
class Solver(FrozenObject):
    """Decoder or weight solver.

    A solver can be compositional or non-compositional. Non-compositional
    solvers must operate on the whole neuron-to-neuron weight matrix, while
    compositional solvers operate in the decoded state space, which is then
    combined with transform/encoders to generate the full weight matrix.
    See the solver's ``compositional`` class attribute to determine if it is
    compositional.
    """

    compositional = True

    weights = BoolParam("weights")

    def __init__(self, weights=False):
        super().__init__()
        self.weights = weights

    def __call__(self, A, Y, rng=np.random):
        """Call the solver.

        Parameters
        ----------
        A : (n_eval_points, n_neurons) array_like
            Matrix of the neurons' activities at the evaluation points
        Y : (n_eval_points, dimensions) array_like
            Matrix of the target decoded values for each of the D dimensions,
            at each of the evaluation points.
        rng : `numpy.random.mtrand.RandomState`, optional
            A random number generator to use as required.

        Returns
        -------
        X : (n_neurons, dimensions) or (n_neurons, post.n_neurons) ndarray
            (n_neurons, dimensions) array of decoders (if ``solver.weights``
            is False) or (n_neurons, post.n_neurons) array of weights
            (if ``'solver.weights`` is True).
        info : dict
            A dictionary of information about the solver. All dictionaries have
            an ``'rmses'`` key that contains RMS errors of the solve.
            Other keys are unique to particular solvers.
        """
        raise NotImplementedError("Solvers must implement '__call__'")
Пример #21
0
class Nnls(Solver):
    """Non-negative least-squares solver without regularization.

    Similar to `.Lstsq`, except the output values are non-negative.
    """

    weights = BoolParam('weights')

    def __init__(self, weights=False):
        """
        .. note:: Requires
                  `SciPy <http://docs.scipy.org/doc/scipy/reference/>`_.

        Parameters
        ----------
        weights : bool, optional (Default: False)
            If False, solve for decoders. If True, solve for weights.

        Attributes
        ----------
        weights : bool
            If False, solve for decoders. If True, solve for weights.
        """
        import scipy.optimize  # import here too to throw error early
        assert scipy.optimize
        self.weights = weights

    def __call__(self, A, Y, rng=None, E=None):
        import scipy.optimize

        tstart = time.time()
        Y, m, n, d, matrix_in = format_system(A, Y)
        Y = self.mul_encoders(Y, E)

        X = np.zeros((n, d))
        residuals = np.zeros(d)
        for i in range(d):
            X[:, i], residuals[i] = scipy.optimize.nnls(A, Y[:, i])

        t = time.time() - tstart
        info = {'rmses': rmses(A, X, Y), 'residuals': residuals, 'time': t}
        return X if matrix_in else X.flatten(), info
Пример #22
0
class LstsqClassifierParts(nengo.solvers.Solver):
    """Have N independent classifiers, each for part of the neurons, combine.
    """

    reg = NumberParam('reg', low=0)
    weight_power = NumberParam('weight_power', low=0)
    precompute_ai = BoolParam('precompute_ai')

    def __init__(self, weights=False, reg=0.1):
        super(LstsqClassifierParts, self).__init__(weights=weights)
        self.reg = reg
        self.weight_power = 1.
        self.precompute_ai = True

    def __call__(self, A, Y, rng=None, E=None):
        tstart = time.time()

        m, n = A.shape
        _, d = Y.shape
        yi = Y.argmax(axis=1)

        sigma = self.reg * A.max()
        precompute_ai = self.precompute_ai
        weight_power = self.weight_power

        X = np.zeros((n, d))

        blocks = 10
        nb, rb = n // blocks, n % blocks
        nblock = [nb + (1 if i < rb else 0) for i in range(blocks)]
        i, j = 0, 0
        for k in range(blocks):
            i, j = j, j + nblock[k]
            X[i:j] = classifier_weighted_lstsq(
                A[:, i:j], yi, d, sigma,
                weight_power=weight_power, precompute_ai=precompute_ai)

        tend = time.time()
        return self.mul_encoders(X, E), {
            'rmses': npext.rms(np.dot(A, X) - Y, axis=1),
            'time': tend - tstart}
Пример #23
0
class Cholesky(LeastSquaresSolver):
    """Solve a least-squares system using the Cholesky decomposition."""

    transpose = BoolParam("transpose", optional=True)

    def __init__(self, transpose=None):
        super().__init__()
        self.transpose = transpose

    def __call__(self, A, Y, sigma, rng=None):
        m, n = A.shape
        transpose = self.transpose
        if transpose is None:
            # transpose if matrix is fat, but not if sigmas for each neuron
            transpose = m < n and sigma.size == 1

        if transpose:
            # substitution: x = A'*xbar, G*xbar = b where G = A*A' + lambda*I
            G = np.dot(A, A.T)
            b = Y
        else:
            # multiplication by A': G*x = A'*b where G = A'*A + lambda*I
            G = np.dot(A.T, A)
            b = np.dot(A.T, Y)

        # add L2 regularization term 'lambda' = m * sigma**2
        np.fill_diagonal(G, G.diagonal() + m * sigma**2)

        try:
            import scipy.linalg  # pylint: disable=import-outside-toplevel

            factor = scipy.linalg.cho_factor(G, overwrite_a=True)
            X = scipy.linalg.cho_solve(factor, b)
        except ImportError:
            L = np.linalg.cholesky(G)
            L = np.linalg.inv(L.T)
            X = np.dot(L, np.dot(L.T, b))

        X = np.dot(A.T, X) if transpose else X
        info = {"rmses": rmses(A, X, Y)}
        return X, info
Пример #24
0
class UniformHypersphere(Distribution):
    """Uniform distribution on or in an n-dimensional unit hypersphere.

    Sample points are uniformly distibuted across the volume (default) or
    surface of an n-dimensional unit hypersphere.

    Parameters
    ----------
    surface : bool
        Whether sample points should be distributed uniformly
        over the surface of the hyperphere (True),
        or within the hypersphere (False).
        Default: False

    """
    surface = BoolParam('surface')

    def __init__(self, surface=False):
        super(UniformHypersphere, self).__init__()
        self.surface = surface

    def __repr__(self):
        return "UniformHypersphere(%s)" % ("surface=True"
                                           if self.surface else "")

    def sample(self, n, d, rng=np.random):
        if d is None or d < 1:  # check this, since other dists allow d = None
            raise ValidationError("Dimensions must be a positive integer", 'd')

        samples = rng.randn(n, d)
        samples /= npext.norm(samples, axis=1, keepdims=True)

        if self.surface:
            return samples

        # Generate magnitudes for vectors from uniform distribution.
        # The (1 / d) exponent ensures that samples are uniformly distributed
        # in n-space and not all bunched up at the centre of the sphere.
        samples *= rng.rand(n, 1)**(1.0 / d)

        return samples
Пример #25
0
class Uniform(Distribution):
    """A uniform distribution.

    It's equally likely to get any scalar between ``low`` and ``high``.

    Note that the order of ``low`` and ``high`` doesn't matter;
    if ``low < high`` this will still work, and ``low`` will still
    be a closed interval while ``high`` is open.

    Parameters
    ----------
    low : Number
        The closed lower bound of the uniform distribution; samples >= low
    high : Number
        The open upper bound of the uniform distribution; samples < high
    integer : boolean, optional (Default: False)
        If true, sample from a uniform distribution of integers. In this case,
        low and high should be integers.
    """

    low = NumberParam('low')
    high = NumberParam('high')
    integer = BoolParam('integer')

    def __init__(self, low, high, integer=False):
        super(Uniform, self).__init__()
        self.low = low
        self.high = high
        self.integer = integer

    def __repr__(self):
        return "Uniform(low=%r, high=%r%s)" % (
            self.low, self.high, ", integer=True" if self.integer else "")

    def sample(self, n, d=None, rng=np.random):
        shape = self._sample_shape(n, d)
        if self.integer:
            return rng.randint(low=self.low, high=self.high, size=shape)
        else:
            return rng.uniform(low=self.low, high=self.high, size=shape)
Пример #26
0
class ScatteredHypersphere(Distribution):
    r"""Quasirandom distribution over the hypersphere or hyperball.

    Applies a spherical transform to the given quasirandom sequence
    (by default `.QuasirandomSequence`) to obtain uniformly scattered samples.

    This distribution has the nice mathematical property that the discrepancy
    between the empirical distribution and :math:`n` samples is
    :math:`\widetilde{\mathcal{O}} (1 / n)` as opposed to
    :math:`\mathcal{O} (1 / \sqrt{n})` for the Monte Carlo method [1]_.
    This means that the number of samples is effectively squared, making this
    useful as a means for sampling ``eval_points`` and ``encoders``.

    Parameters
    ----------
    surface : bool, optional
        Whether sample points should be distributed uniformly
        over the surface of the hyperphere (True),
        or within the hypersphere (False).
    min_magnitude : Number, optional
        Lower bound on the returned vector magnitudes (such that they are in
        the range ``[min_magnitude, 1]``). Must be in the range [0, 1).
        Ignored if ``surface`` is ``True``.
    base : `.Distribution`, optional
        The base distribution from which to sample quasirandom numbers.
    method : {"sct-approx", "sct", "tfww"}
        Method to use for mapping points to the hypersphere.

        * "sct-approx": Same as "sct", but uses lookup table to approximate the
          beta distribution, making it faster with almost exactly the same result.
        * "sct": Use the exact Spherical Coordinate Transform
          (section 1.5.2 of [1]_).
        * "tfww": Use the Tashiro-Fang-Wang-Wong method (section 4.3 of [1]_).
          Faster than "sct" and "sct-approx", with the same level of uniformity
          for larger numbers of samples (``n >= 4000``, approximately).

    See Also
    --------
    UniformHypersphere
    QuasirandomSequence

    Notes
    -----
    The `.QuasirandomSequence` distribution is mostly deterministic.
    Nondeterminism comes from a random ``d``-dimensional rotation.

    References
    ----------
    .. [1] K.-T. Fang and Y. Wang, Number-Theoretic Methods in Statistics.
       Chapman & Hall, 1994.

    Examples
    --------
    Plot points sampled from the surface of the sphere in 3 dimensions:

    .. testcode::

       from mpl_toolkits.mplot3d import Axes3D

       points = nengo.dists.ScatteredHypersphere(surface=True).sample(1000, d=3)

       ax = plt.subplot(111, projection="3d")
       ax.scatter(*points.T, s=5)

    Plot points sampled from the volume of the sphere in 2 dimensions (i.e. circle):

    .. testcode::

       points = nengo.dists.ScatteredHypersphere(surface=False).sample(1000, d=2)
       plt.scatter(*points.T, s=5)
    """

    surface = BoolParam("surface")
    min_magnitude = NumberParam("min_magnitude", low=0, high=1, high_open=True)
    base = DistributionParam("base")
    method = EnumParam("method", values=("sct-approx", "sct", "tfww"))

    def __init__(
            self,
            surface=False,
            min_magnitude=0,
            base=QuasirandomSequence(),
            method="sct-approx",
    ):
        super().__init__()
        if surface and min_magnitude > 0:
            warnings.warn("min_magnitude ignored because surface is True")
        self.surface = surface
        self.min_magnitude = min_magnitude
        self.base = base
        self.method = method

        if self.method == "sct":
            import scipy.special  # pylint: disable=import-outside-toplevel

            assert scipy.special

    @classmethod
    def spherical_coords_ppf(cls, dims, y, approx=False):
        if not approx:
            import scipy.special  # pylint: disable=import-outside-toplevel

        y_reflect = np.where(y < 0.5, y, 1 - y)
        if approx:
            z_sq = _betaincinv22.lookup(dims, 2 * y_reflect)
        else:
            z_sq = scipy.special.betaincinv(dims / 2.0, 0.5, 2 * y_reflect)
        x = np.arcsin(np.sqrt(z_sq)) / np.pi
        return np.where(y < 0.5, x, 1 - x)

    @classmethod
    def spherical_transform_sct(cls, samples, approx=False):
        """Map samples from the ``[0, 1]``-cube onto the hypersphere.

        Uses the SCT method described in section 1.5.3 of Fang and Wang (1994).
        """
        samples = np.asarray(samples)
        samples = samples[:, np.newaxis] if samples.ndim == 1 else samples
        n, d = samples.shape

        # inverse transform method (section 1.5.2)
        coords = np.empty_like(samples)
        for j in range(d):
            coords[:, j] = cls.spherical_coords_ppf(d - j,
                                                    samples[:, j],
                                                    approx=approx)

        # spherical coordinate transform
        mapped = np.ones((n, d + 1))
        i = np.ones(d)
        i[-1] = 2.0
        s = np.sin(i[np.newaxis, :] * np.pi * coords)
        c = np.cos(i[np.newaxis, :] * np.pi * coords)
        mapped[:, 1:] = np.cumprod(s, axis=1)
        mapped[:, :-1] *= c
        return mapped

    @staticmethod
    def spherical_transform_tfww(c_samples):
        """Map samples from the ``[0, 1]``-cube onto the hypersphere surface.

        Uses the TFWW method described in section 4.3 of Fang and Wang (1994).
        """
        c_samples = np.asarray(c_samples)
        c_samples = c_samples[:,
                              np.newaxis] if c_samples.ndim == 1 else c_samples
        n, s1 = c_samples.shape
        s = s1 + 1

        x_samples = np.zeros((n, s))

        if s == 2:
            phi = 2 * np.pi * c_samples[:, 0]
            x_samples[:, 0] = np.cos(phi)
            x_samples[:, 1] = np.sin(phi)
            return x_samples

        even = s % 2 == 0
        m = s // 2 if even else (s - 1) // 2

        g = np.zeros((n, m + 1))
        g[:, -1] = 1
        for j in range(m - 1, 0, -1):
            g[:,
              j] = g[:, j + 1] * c_samples[:, j - 1]**((1.0 / j) if even else
                                                       (2.0 / (2 * j + 1)))

        d = np.sqrt(np.diff(g, axis=1))

        phi = c_samples[:, m - 1:]
        if even:
            phi *= 2 * np.pi
            x_samples[:, 0::2] = d * np.cos(phi)
            x_samples[:, 1::2] = d * np.sin(phi)
        else:
            # there is a mistake in eq. 4.3.7 here, see eq. 1.5.28 for correct version
            phi[:, 1:] *= 2 * np.pi
            f = 2 * d[:, 0] * np.sqrt(phi[:, 0] * (1 - phi[:, 0]))
            x_samples[:, 0] = d[:, 0] * (1 - 2 * phi[:, 0])
            x_samples[:, 1] = f * np.cos(phi[:, 1])
            x_samples[:, 2] = f * np.sin(phi[:, 1])
            if s > 3:
                x_samples[:, 3::2] = d[:, 1:] * np.cos(phi[:, 2:])
                x_samples[:, 4::2] = d[:, 1:] * np.sin(phi[:, 2:])

        return x_samples

    @staticmethod
    def random_orthogonal(d, rng=np.random):
        """Returns a random orthogonal matrix."""
        m = rng.standard_normal((d, d))
        u, _, v = np.linalg.svd(m)
        return np.dot(u, v)

    def sample(self, n, d=1, rng=np.random):
        if d == 1 and self.surface:
            return np.sign(self.base.sample(n, d, rng) - 0.5)

        if d == 1:
            pos_samples = self.base.sample(int(n / 2), d, rng)
            neg_samples = self.base.sample(n - pos_samples.size, d, rng)
            if self.min_magnitude > 0:
                for samples in [pos_samples, neg_samples]:
                    samples *= 1.0 - self.min_magnitude
                    samples += self.min_magnitude
            samples = np.vstack([pos_samples, -1 * neg_samples])
            rng.shuffle(samples)
            return samples

        radius = None
        if self.surface:
            samples = self.base.sample(n, d - 1, rng)
        else:
            samples = self.base.sample(n, d, rng)
            samples, radius = samples[:, :-1], samples[:, -1:]
            if self.min_magnitude != 0:
                min_d = self.min_magnitude**d
                radius *= 1 - min_d
                radius += min_d
            radius **= 1.0 / d

        if self.method == "sct":
            mapped = self.spherical_transform_sct(samples, approx=False)
        elif self.method == "sct-approx":
            mapped = self.spherical_transform_sct(samples, approx=True)
        else:
            assert self.method == "tfww"
            mapped = self.spherical_transform_tfww(samples)

        # radius adjustment for ball
        if radius is not None:
            mapped *= radius

        # random rotation
        rotation = self.random_orthogonal(d, rng=rng)
        return np.dot(mapped, rotation)
Пример #27
0
class BCM2(LearningRuleType):
    """Bienenstock-Cooper-Munroe learning rule.
    Modifies connection weights as a function of the presynaptic activity
    and the difference between the postsynaptic activity and the average
    postsynaptic activity.
    Notes
    -----
    The BCM rule is dependent on pre and post neural activities,
    not decoded values, and so is not affected by changes in the
    size of pre and post ensembles. However, if you are decoding from
    the post ensemble, the BCM rule will have an increased effect on
    larger post ensembles because more connection weights are changing.
    In these cases, it may be advantageous to scale the learning rate
    on the BCM rule by ``1 / post.n_neurons``.
    Parameters
    ----------
    learning_rate : float, optional (Default: 1e-9)
        A scalar indicating the rate at which weights will be adjusted.
    pre_synapse : `.Synapse`, optional \
                  (Default: ``nengo.synapses.Lowpass(tau=0.005)``)
        Synapse model used to filter the pre-synaptic activities.
    post_synapse : `.Synapse`, optional (Default: ``None``)
        Synapse model used to filter the post-synaptic activities.
        If None, ``post_synapse`` will be the same as ``pre_synapse``.
    theta_synapse : `.Synapse`, optional \
                    (Default: ``nengo.synapses.Lowpass(tau=1.0)``)
        Synapse model used to filter the theta signal.
    max_weights : float, optional (Default: None)
    
    Attributes
    ----------
    learning_rate : float
        A scalar indicating the rate at which weights will be adjusted.
    post_synapse : `.Synapse`
        Synapse model used to filter the post-synaptic activities.
    pre_synapse : `.Synapse`
        Synapse model used to filter the pre-synaptic activities.
    theta_synapse : `.Synapse`
        Synapse model used to filter the theta signal.
    """

    modifies = 'weights'
    probeable = ('theta', 'pre_filtered', 'post_filtered', 'delta')

    learning_rate = NumberParam(
        'learning_rate', low=0, readonly=True, default=1e-9)
    pre_synapse = SynapseParam(
        'pre_synapse', default=Lowpass(tau=0.005), readonly=True)
    post_synapse = SynapseParam(
        'post_synapse', default=None, readonly=True)
    theta_synapse = SynapseParam(
        'theta_synapse', default=Lowpass(tau=1.0), readonly=True)
    max_weight = NumberParam(
         'max_weight', readonly=True, default=None)
    diagonal0 = BoolParam('diagonal0', readonly=True, default=True)

    def __init__(self, learning_rate=Default, pre_synapse=Default,
                 post_synapse=Default, theta_synapse=Default, max_weight=Default,diagonal0=Default,
                 pre_tau=Unconfigurable, post_tau=Unconfigurable,
                 theta_tau=Unconfigurable):
        super().__init__(learning_rate, size_in=0)

        self.max_weight=max_weight
        self.diagonal0 = diagonal0
        
        if pre_tau is Unconfigurable:
            self.pre_synapse = pre_synapse
        else:
            self.pre_tau = pre_tau

        if post_tau is Unconfigurable:
            self.post_synapse = (self.pre_synapse if post_synapse is Default
                                 else post_synapse)
        else:
            self.post_tau = post_tau

        if theta_tau is Unconfigurable:
            self.theta_synapse = theta_synapse
        else:
            self.theta_tau = theta_tau

    @property
    def _argdefaults(self):
        return (('learning_rate', BCM2.learning_rate.default),
                ('pre_synapse', BCM2.pre_synapse.default),
                ('post_synapse', self.pre_synapse),
                ('theta_synapse', BCM2.theta_synapse.default),
                ('max_weight', BCM2.max_weight.default),
                ('diagonal0', BCM2.diagonal0.default))
Пример #28
0
class Convolution(Transform):
    """An N-dimensional convolutional transform.

    The dimensionality of the convolution is determined by the input shape.

    .. versionadded:: 3.0.0

    Parameters
    ----------
    n_filters : int
        The number of convolutional filters to apply
    input_shape : tuple of int or `.ChannelShape`
        Shape of the input signal to the convolution; e.g.,
        ``(height, width, channels)`` for a 2D convolution with
        ``channels_last=True``.
    kernel_size : tuple of int, optional
        Size of the convolutional kernels (1 element for a 1D convolution,
        2 for a 2D convolution, etc.).
    strides : tuple of int, optional
        Stride of the convolution (1 element for a 1D convolution, 2 for
        a 2D convolution, etc.).
    padding : ``"same"`` or ``"valid"``, optional
        Padding method for input signal. "Valid" means no padding, and
        convolution will only be applied to the fully-overlapping areas of the
        input signal (meaning the output will be smaller). "Same" means that
        the input signal is zero-padded so that the output is the same shape
        as the input.
    channels_last : bool, optional
        If ``True`` (default), the channels are the last dimension in the input
        signal (e.g., a 28x28 image with 3 channels would have shape
        ``(28, 28, 3)``).  ``False`` means that channels are the first
        dimension (e.g., ``(3, 28, 28)``).
    init : `.Distribution` or `~numpy:numpy.ndarray`, optional
        A predefined kernel with shape
        ``kernel_size + (input_channels, n_filters)``, or a ``Distribution``
        that will be used to initialize the kernel.

    Notes
    -----
    As is typical in neural networks, this is technically correlation rather
    than convolution (because the kernel is not flipped).
    """

    n_filters = IntParam("n_filters", low=1)
    input_shape = ChannelShapeParam("input_shape", low=1)
    kernel_size = ShapeParam("kernel_size", low=1)
    strides = ShapeParam("strides", low=1)
    padding = EnumParam("padding", values=("same", "valid"))
    channels_last = BoolParam("channels_last")
    init = DistOrArrayParam("init")

    _param_init_order = ["channels_last", "input_shape"]

    def __init__(
            self,
            n_filters,
            input_shape,
            kernel_size=(3, 3),
            strides=(1, 1),
            padding="valid",
            channels_last=True,
            init=Uniform(-1, 1),
    ):
        super().__init__()

        self.n_filters = n_filters
        self.channels_last = channels_last  # must be set before input_shape
        self.input_shape = input_shape
        self.kernel_size = kernel_size
        self.strides = strides
        self.padding = padding
        self.init = init

        if len(kernel_size) != self.dimensions:
            raise ValidationError(
                "Kernel dimensions (%d) do not match input dimensions (%d)" %
                (len(kernel_size), self.dimensions),
                attr="kernel_size",
            )
        if len(strides) != self.dimensions:
            raise ValidationError(
                "Stride dimensions (%d) do not match input dimensions (%d)" %
                (len(strides), self.dimensions),
                attr="strides",
            )
        if not isinstance(init, Distribution):
            if init.shape != self.kernel_shape:
                raise ValidationError(
                    "Kernel shape %s does not match expected shape %s" %
                    (init.shape, self.kernel_shape),
                    attr="init",
                )

    @property
    def _argreprs(self):
        argreprs = [
            "n_filters=%r" % (self.n_filters, ),
            "input_shape=%s" % (self.input_shape.shape, ),
        ]
        if self.kernel_size != (3, 3):
            argreprs.append("kernel_size=%r" % (self.kernel_size, ))
        if self.strides != (1, 1):
            argreprs.append("strides=%r" % (self.strides, ))
        if self.padding != "valid":
            argreprs.append("padding=%r" % (self.padding, ))
        if self.channels_last is not True:
            argreprs.append("channels_last=%r" % (self.channels_last, ))
        return argreprs

    def sample(self, rng=np.random):
        if isinstance(self.init, Distribution):
            # we sample this way so that any variancescaling distribution based
            # on n/d is scaled appropriately
            kernel = [
                self.init.sample(self.input_shape.n_channels,
                                 self.n_filters,
                                 rng=rng)
                for _ in range(np.prod(self.kernel_size))
            ]
            kernel = np.reshape(kernel, self.kernel_shape)
        else:
            kernel = np.array(self.init, dtype=rc.float_dtype)
        return kernel

    @property
    def kernel_shape(self):
        """Full shape of kernel."""
        return self.kernel_size + (self.input_shape.n_channels, self.n_filters)

    @property
    def size_in(self):
        return self.input_shape.size

    @property
    def size_out(self):
        return self.output_shape.size

    @property
    def dimensions(self):
        """Dimensionality of convolution."""
        return self.input_shape.dimensions

    @property
    def output_shape(self):
        """Output shape after applying convolution to input."""
        output_shape = np.array(self.input_shape.spatial_shape,
                                dtype=rc.float_dtype)
        if self.padding == "valid":
            output_shape -= self.kernel_size
            output_shape += 1
        output_shape /= self.strides
        output_shape = tuple(np.ceil(output_shape).astype(rc.int_dtype))
        output_shape = (output_shape +
                        (self.n_filters, ) if self.channels_last else
                        (self.n_filters, ) + output_shape)

        return ChannelShape(output_shape, channels_last=self.channels_last)
Пример #29
0
class LinearFilter(Synapse):
    """General linear time-invariant (LTI) system synapse.

    This class can be used to implement any linear filter, given the
    filter's transfer function. [1]_

    Parameters
    ----------
    num : array_like
        Numerator coefficients of transfer function.
    den : array_like
        Denominator coefficients of transfer function.
    analog : boolean, optional (Default: True)
        Whether the synapse coefficients are analog (i.e. continuous-time),
        or discrete. Analog coefficients will be converted to discrete for
        simulation using the simulator ``dt``.

    Attributes
    ----------
    analog : boolean
        Whether the synapse coefficients are analog (i.e. continuous-time),
        or discrete. Analog coefficients will be converted to discrete for
        simulation using the simulator ``dt``.
    den : ndarray
        Denominator coefficients of transfer function.
    num : ndarray
        Numerator coefficients of transfer function.

    References
    ----------
    .. [1] http://en.wikipedia.org/wiki/Filter_%28signal_processing%29
    """

    num = NdarrayParam('num', shape='*')
    den = NdarrayParam('den', shape='*')
    analog = BoolParam('analog')

    def __init__(self, num, den, analog=True, **kwargs):
        super(LinearFilter, self).__init__(**kwargs)
        self.num = num
        self.den = den
        self.analog = analog

    def __repr__(self):
        return "%s(%s, %s, analog=%r)" % (
            type(self).__name__, self.num, self.den, self.analog)

    def evaluate(self, frequencies):
        """Evaluate the transfer function at the given frequencies.

        Examples
        --------

        Using the ``evaluate`` function to make a Bode plot::

            synapse = nengo.synapses.LinearFilter([1], [0.02, 1])
            f = numpy.logspace(-1, 3, 100)
            y = synapse.evaluate(f)
            plt.subplot(211); plt.semilogx(f, 20*np.log10(np.abs(y)))
            plt.xlabel('frequency [Hz]'); plt.ylabel('magnitude [dB]')
            plt.subplot(212); plt.semilogx(f, np.angle(y))
            plt.xlabel('frequency [Hz]'); plt.ylabel('phase [radians]')
        """
        frequencies = 2.j*np.pi*frequencies
        w = frequencies if self.analog else np.exp(frequencies)
        y = np.polyval(self.num, w) / np.polyval(self.den, w)
        return y

    def make_step(self, shape_in, shape_out, dt, rng, y0=None,
                  dtype=np.float64, method='zoh'):
        """Returns a `.Step` instance that implements the linear filter."""
        assert shape_in == shape_out

        num, den = self.num, self.den
        if self.analog:
            num, den, _ = cont2discrete((num, den), dt, method=method)
            num = num.flatten()

        if den[0] != 1.:
            raise ValidationError("First element of the denominator must be 1",
                                  attr='den', obj=self)
        num = num[1:] if num[0] == 0 else num
        den = den[1:]  # drop first element (equal to 1)
        num, den = num.astype(dtype), den.astype(dtype)

        output = np.zeros(shape_out, dtype=dtype)
        if len(num) == 1 and len(den) == 0:
            return LinearFilter.NoDen(num, den, output)
        elif len(num) == 1 and len(den) == 1:
            return LinearFilter.Simple(num, den, output, y0=y0)
        return LinearFilter.General(num, den, output, y0=y0)

    @staticmethod
    def _make_zero_step(shape_in, shape_out, dt, rng, y0=None,
                        dtype=np.float64):
        output = np.zeros(shape_out, dtype=dtype)
        if y0 is not None:
            output[:] = y0

        return LinearFilter.NoDen(np.array([1.]), np.array([]), output)

    class Step(object):
        """Abstract base class for LTI filtering step functions."""
        def __init__(self, num, den, output):
            self.num = num
            self.den = den
            self.output = output

        def __call__(self, t, signal):
            raise NotImplementedError("Step functions must implement __call__")

    class NoDen(Step):
        """An LTI step function for transfer functions with no denominator.

        This step function should be much faster than the equivalent general
        step function.
        """
        def __init__(self, num, den, output):
            if len(den) > 0:
                raise ValidationError("'den' must be empty (got length %d)"
                                      % len(den), attr='den', obj=self)
            super(LinearFilter.NoDen, self).__init__(num, den, output)
            self.b = num[0]

        def __call__(self, t, signal):
            self.output[...] = self.b * signal
            return self.output

    class Simple(Step):
        """An LTI step function for transfer functions with one num and den.

        This step function should be much faster than the equivalent general
        step function.
        """
        def __init__(self, num, den, output, y0=None):
            if len(num) != 1:
                raise ValidationError("'num' must be length 1 (got %d)"
                                      % len(num), attr='num', obj=self)
            if len(den) != 1:
                raise ValidationError("'den' must be length 1 (got %d)"
                                      % len(den), attr='den', obj=self)

            super(LinearFilter.Simple, self).__init__(num, den, output)
            self.b = num[0]
            self.a = den[0]
            if y0 is not None:
                self.output[...] = y0

        def __call__(self, t, signal):
            self.output *= -self.a
            self.output += self.b * signal
            return self.output

    class General(Step):
        """An LTI step function for any given transfer function.

        Implements a discrete-time LTI system using the difference equation
        [1]_ for the given transfer function (num, den).

        References
        ----------
        .. [1] http://en.wikipedia.org/wiki/Digital_filter#Difference_equation
        """
        def __init__(self, num, den, output, y0=None):
            super(LinearFilter.General, self).__init__(num, den, output)
            self.x = collections.deque(maxlen=len(num))
            self.y = collections.deque(maxlen=len(den))
            if y0 is not None:
                self.output[...] = y0
                for _ in num:
                    self.x.appendleft(np.array(self.output))
                for _ in den:
                    self.y.appendleft(np.array(self.output))

        def __call__(self, t, signal):
            self.output[...] = 0

            self.x.appendleft(np.array(signal))
            for k, xk in enumerate(self.x):
                self.output += self.num[k] * xk
            for k, yk in enumerate(self.y):
                self.output -= self.den[k] * yk
            self.y.appendleft(np.array(self.output))

            return self.output
Пример #30
0
class TensorNode(Node):
    """
    Inserts TensorFlow code into a Nengo model.

    Parameters
    ----------
    tensor_func : callable
        A function that maps node inputs to outputs
    shape_in : tuple of int
        Shape of TensorNode input signal (not including batch dimension).
    shape_out : tuple of int
        Shape of TensorNode output signal (not including batch dimension).
        If None, value will be inferred by calling ``tensor_func``.
    pass_time : bool
        If True, pass current simulation time to TensorNode function (in addition
        to the standard input).
    label : str (Default: None)
        A name for the node, used for debugging and visualization
    """

    tensor_func = TensorFuncParam("tensor_func")
    shape_in = ShapeParam("shape_in", default=None, low=1, optional=True)
    shape_out = ShapeParam("shape_out", default=None, low=1, optional=True)
    pass_time = BoolParam("pass_time", default=True)

    def __init__(
        self,
        tensor_func,
        shape_in=Default,
        shape_out=Default,
        pass_time=Default,
        label=Default,
    ):
        # pylint: disable=non-parent-init-called,super-init-not-called
        # note: we bypass the Node constructor, because we don't want to
        # perform validation on `output`
        NengoObject.__init__(self, label=label, seed=None)

        self.shape_in = shape_in
        self.shape_out = shape_out
        self.pass_time = pass_time

        if not (self.shape_in or self.pass_time):
            raise ValidationError(
                "Must specify either shape_in or pass_time", "TensorNode"
            )

        self.tensor_func = tensor_func

    @property
    def output(self):
        """
        Ensures that nothing tries to evaluate the `output` attribute
        (indicating that something is trying to simulate this as a regular
        `nengo.Node` rather than a TensorNode).
        """

        def output_func(*_):
            raise SimulationError(
                "Cannot call TensorNode output function (this probably means "
                "you are trying to use a TensorNode inside a Simulator other "
                "than NengoDL)"
            )

        return output_func

    @property
    def size_in(self):
        """Number of input elements (flattened)."""

        return 0 if self.shape_in is None else np.prod(self.shape_in)

    @property
    def size_out(self):
        """Number of output elements (flattened)."""

        return 0 if self.shape_out is None else np.prod(self.shape_out)