Beispiel #1
0
class RMSP(LearningRuleType):
    """Hebbian synaptic plasticity learning rule.  Modifies connection weights
    according to the presynaptic and postsynaptic firing rates and the
    target firing rate.

    """
    modifies = 'weights'
    probeable = ('pre_filtered', 'post_filtered', 'delta')
    
    learning_rate = NumberParam("learning_rate", low=0, readonly=True, default=1e-6)
    pre_synapse = SynapseParam("pre_synapse", default=Lowpass(tau=0.005), readonly=True)
    post_synapse = SynapseParam("post_synapse", default=None, readonly=True)

    jit = BoolParam("jit", default=True, readonly=True)

    def __init__(self,
                 learning_rate=Default,
                 pre_synapse=Default,
                 post_synapse=Default,
                 theta_synapse=Default,
                 jit=Default):
        super().__init__(learning_rate, size_in=1)
        self.pre_synapse = pre_synapse
        self.post_synapse = (
            self.pre_synapse if post_synapse is Default else post_synapse
        )
        self.jit = jit

    @property
    def _argreprs(self):
        return _remove_default_post_synapse(super()._argreprs, self.pre_synapse)
Beispiel #2
0
class FilteredNoise(Process):
    """Filtered white noise process.

    This process takes white noise and filters it using the provided synapse.

    Parameters
    ----------
    synapse : Synapse, optional (Default: ``Lowpass(tau=0.005)``)
        The synapse to use to filter the noise.
    dist : Distribution, optional (Default: ``Gaussian(mean=0, std=1)``)
        The distribution used to generate the white noise.
    scale : bool, optional (Default: True)
        Whether to scale the white noise for integration, making the output
        signal invariant to ``dt``.
    synapse_kwargs : dict, optional (Default: None)
        Arguments to pass to ``synapse.make_step``.
    seed : int, optional (Default: None)
        Random number seed. Ensures noise will be the same each run.
    """

    synapse = SynapseParam('synapse')
    dist = DistributionParam('dist')
    scale = BoolParam('scale')
    synapse_kwargs = DictParam('synapse_kwargs')

    def __init__(self,
                 synapse=Lowpass(tau=0.005),
                 dist=Gaussian(mean=0, std=1),
                 scale=True,
                 synapse_kwargs=None,
                 **kwargs):
        super(FilteredNoise, self).__init__(default_size_in=0, **kwargs)
        self.synapse = synapse
        self.synapse_kwargs = {} if synapse_kwargs is None else synapse_kwargs
        self.dist = dist
        self.scale = scale

    def __repr__(self):
        return "%s(synapse=%r, dist=%r, scale=%r)" % (
            type(self).__name__, self.synapse, self.dist, self.scale)

    def make_step(self, shape_in, shape_out, dt, rng):
        assert shape_in == (0, )
        assert len(shape_out) == 1

        dist = self.dist
        scale = self.scale
        alpha = 1. / np.sqrt(dt)
        filter_step = self.synapse.make_step(shape_out, shape_out, dt, None,
                                             **self.synapse_kwargs)

        def step_filterednoise(t):
            x = dist.sample(n=1, d=shape_out[0], rng=rng)[0]
            if scale:
                x *= alpha
            return filter_step(t, x)

        return step_filterednoise
class Voja(LearningRuleType):
    """Vector Oja learning rule.

    Modifies an ensemble's encoders to be selective to its inputs.

    A connection to the learning rule will provide a scalar weight for the
    learning rate, minus 1. For instance, 0 is normal learning, -1 is no
    learning, and less than -1 causes anti-learning or "forgetting".

    Parameters
    ----------
    post_tau : float, optional
        Filter constant on activities of neurons in post population.
    learning_rate : float, optional
        A scalar indicating the rate at which encoders will be adjusted.
    post_synapse : `.Synapse`, optional
        Synapse model used to filter the post-synaptic activities.

    Attributes
    ----------
    learning_rate : float
        A scalar indicating the rate at which encoders will be adjusted.
    post_synapse : `.Synapse`
        Synapse model used to filter the post-synaptic activities.
    """

    modifies = "encoders"
    probeable = ("post_filtered", "scaled_encoders", "delta")

    learning_rate = NumberParam("learning_rate",
                                low=0,
                                readonly=True,
                                default=1e-2)
    post_synapse = SynapseParam("post_synapse",
                                default=Lowpass(tau=0.005),
                                readonly=True)

    post_tau = _deprecated_tau("post_tau", "post_synapse")

    def __init__(self,
                 learning_rate=Default,
                 post_synapse=Default,
                 post_tau=Unconfigurable):
        super().__init__(learning_rate, size_in=1)

        if post_tau is Unconfigurable:
            self.post_synapse = post_synapse
        else:
            self.post_tau = post_tau

    @property
    def _argdefaults(self):
        return (
            ("learning_rate", Voja.learning_rate.default),
            ("post_synapse", Voja.post_synapse.default),
        )
class mPES(LearningRuleType):
    modifies = "weights"
    probeable = ("error", "activities", "delta", "pos_memristors",
                 "neg_memristors")

    pre_synapse = SynapseParam("pre_synapse",
                               default=Lowpass(tau=0.005),
                               readonly=True)
    r_max = NumberParam("r_max", readonly=True, default=2.3e8)
    r_min = NumberParam("r_min", readonly=True, default=200)
    exponent = NumberParam("exponent", readonly=True, default=-0.146)
    gain = NumberParam("gain", readonly=True, default=1e3)
    voltage = NumberParam("voltage", readonly=True, default=1e-1)
    initial_state = DictParam("initial_state", optional=True)

    def __init__(self,
                 pre_synapse=Default,
                 r_max=Default,
                 r_min=Default,
                 exponent=Default,
                 noisy=False,
                 gain=Default,
                 voltage=Default,
                 initial_state=None,
                 seed=None):
        super().__init__(size_in="post_state")

        self.pre_synapse = pre_synapse
        self.r_max = r_max
        self.r_min = r_min
        self.exponent = exponent
        if not noisy:
            self.noise_percentage = np.zeros(4)
        elif isinstance(noisy, float) or isinstance(noisy, int):
            self.noise_percentage = np.full(4, noisy)
        elif isinstance(noisy, list) and len(noisy) == 4:
            self.noise_percentage = noisy
        else:
            raise ValueError(
                f"Noisy parameter must be int or list of length 4, not {type( noisy )}"
            )
        self.gain = gain
        self.voltage = voltage
        self.seed = seed
        self.initial_state = {} if initial_state is None else initial_state

    @property
    def _argdefaults(self):
        return (
            ("pre_synapse", mPES.pre_synapse.default),
            ("r_max", mPES.r_max.default),
            ("r_min", mPES.r_min.default),
            ("exponent", mPES.exponent.default),
        )
Beispiel #5
0
class FilteredNoise(Process):
    """Filtered white noise process.

    This process takes white noise and filters it using the provided synapse.

    Parameters
    ----------
    synapse : Synapse, optional
        The synapse to use to filter the noise.
    dist : Distribution, optional
        The distribution used to generate the white noise.
    scale : bool, optional
        Whether to scale the white noise for integration, making the output
        signal invariant to ``dt``.
    seed : int, optional
        Random number seed. Ensures noise will be the same each run.
    """

    synapse = SynapseParam("synapse")
    dist = DistributionParam("dist")
    scale = BoolParam("scale")

    def __init__(
            self,
            synapse=Lowpass(tau=0.005),
            dist=Gaussian(mean=0, std=1),
            scale=True,
            **kwargs,
    ):
        super().__init__(default_size_in=0, **kwargs)
        self.synapse = synapse
        self.dist = dist
        self.scale = scale

    def make_state(self, shape_in, shape_out, dt, dtype=None):
        return self.synapse.make_state(shape_out, shape_out, dt, dtype=dtype)

    def make_step(self, shape_in, shape_out, dt, rng, state):
        assert shape_in == (0, )
        assert len(shape_out) == 1

        dist = self.dist
        scale = self.scale
        alpha = 1.0 / np.sqrt(dt)
        filter_step = self.synapse.make_step(shape_out, shape_out, dt, rng,
                                             state)

        def step_filterednoise(t):
            x = dist.sample(n=1, d=shape_out[0], rng=rng)[0]
            if scale:
                x *= alpha
            return filter_step(t, x)

        return step_filterednoise
class PES(LearningRuleType):
    """Prescribed Error Sensitivity learning rule.

    Modifies a connection's decoders to minimize an error signal provided
    through a connection to the connection's learning rule.

    Parameters
    ----------
    learning_rate : float, optional
        A scalar indicating the rate at which weights will be adjusted.
    pre_synapse : `.Synapse`, optional
        Synapse model used to filter the pre-synaptic activities.

    Attributes
    ----------
    learning_rate : float
        A scalar indicating the rate at which weights will be adjusted.
    pre_synapse : `.Synapse`
        Synapse model used to filter the pre-synaptic activities.
    """

    modifies = "decoders"
    probeable = ("error", "activities", "delta")

    learning_rate = NumberParam("learning_rate",
                                low=0,
                                readonly=True,
                                default=1e-4)
    pre_synapse = SynapseParam("pre_synapse",
                               default=Lowpass(tau=0.005),
                               readonly=True)

    pre_tau = _deprecated_tau("pre_tau", "pre_synapse")

    def __init__(self,
                 learning_rate=Default,
                 pre_synapse=Default,
                 pre_tau=Unconfigurable):
        super().__init__(learning_rate, size_in="post_state")
        if learning_rate is not Default and learning_rate >= 1.0:
            warnings.warn("This learning rate is very high, and can result "
                          "in floating point errors from too much current.")

        if pre_tau is Unconfigurable:
            self.pre_synapse = pre_synapse
        else:
            self.pre_tau = pre_tau

    @property
    def _argdefaults(self):
        return (
            ("learning_rate", PES.learning_rate.default),
            ("pre_synapse", PES.pre_synapse.default),
        )
class mPES(LearningRuleType):
    modifies = "weights"
    probeable = ("error", "activities", "delta", "pos_memristors",
                 "neg_memristors")

    pre_synapse = SynapseParam("pre_synapse",
                               default=Lowpass(tau=0.005),
                               readonly=True)
    r_max = NumberParam("r_max", readonly=True, default=2.3e8)
    r_min = NumberParam("r_min", readonly=True, default=200)
    exponent = NumberParam("exponent", readonly=True, default=-0.146)
    gain = NumberParam("gain", readonly=True, default=1e3)

    def __init__(self,
                 pre_synapse=Default,
                 r_max=Default,
                 r_min=Default,
                 exponent=Default,
                 noisy=False,
                 gain=Default,
                 seed=None):
        super().__init__(size_in="post_state")

        self.pre_synapse = pre_synapse
        self.r_max = r_max
        self.r_min = r_min
        self.exponent = exponent
        self.noise_percentage = 0 if not noisy else noisy
        self.gain = gain
        self.seed = seed

    @property
    def _argdefaults(self):
        return (
            ("learning_rate", mPES.learning_rate.default),
            ("pre_synapse", mPES.pre_synapse.default),
            ("r_max", mPES.r_max.default),
            ("r_min", mPES.r_min.default),
            ("exponent", mPES.exponent.default),
        )
Beispiel #8
0
class Probe(NengoObject):
    """A probe is an object that collects data from the simulation.

    This is to be used in any situation where you wish to gather simulation
    data (spike data, represented values, neuron voltages, etc.) for analysis.

    Probes do not directly affect the simulation.

    All Nengo objects can be probed (except Probes themselves).
    Each object has different attributes that can be probed.
    To see what is probeable for each object, print its
    ``probeable`` attribute.

    .. testcode::

       with nengo.Network():
           ens = nengo.Ensemble(10, 1)
       print(ens.probeable)

    .. testoutput::

       ('decoded_output', 'input', 'scaled_encoders')

    Parameters
    ----------
    target : Ensemble, Neurons, Node, or Connection
        The object to probe.

    attr : str, optional
        The signal to probe. Refer to the target's ``probeable`` list for
        details. If None, the first element in the ``probeable`` list
        will be used.
    sample_every : float, optional
        Sampling period in seconds. If None, the ``dt`` of the simluation
        will be used.
    synapse : Synapse, optional
        A synaptic model to filter the probed signal.
    solver : Solver, optional
        `~nengo.solvers.Solver` to compute decoders
        for probes that require them.
    label : str, optional
        A name for the probe. Used for debugging and visualization.
    seed : int, optional
        The seed used for random number generation.

    Attributes
    ----------
    attr : str or None
        The signal that will be probed. If None, the first element of the
        target's ``probeable`` list will be used.
    sample_every : float or None
        Sampling period in seconds. If None, the ``dt`` of the simluation
        will be used.
    solver : Solver or None
        `~nengo.solvers.Solver` to compute decoders. Only used for probes
        of an ensemble's decoded output.
    synapse : Synapse or None
        A synaptic model to filter the probed signal.
    target : Ensemble, Neurons, Node, or Connection
        The object to probe.
    """

    target = TargetParam("target", nonzero_size_out=True)
    attr = AttributeParam("attr", default=None, optional=True)
    sample_every = NumberParam("sample_every",
                               default=None,
                               optional=True,
                               low=1e-10)
    synapse = SynapseParam("synapse", default=None, optional=True)
    solver = ProbeSolverParam("solver", default=ConnectionDefault)

    _param_init_order = ["target"]

    def __init__(
        self,
        target,
        attr=None,
        sample_every=Default,
        synapse=Default,
        solver=Default,
        label=Default,
        seed=Default,
    ):
        super().__init__(label=label, seed=seed)
        self.target = target
        self.attr = attr if attr is not None else self.obj.probeable[0]
        self.sample_every = sample_every
        self.synapse = synapse
        self.solver = solver

    def __repr__(self):
        label_txt = "" if self.label is None else f' "{self.label}"'
        return f"<Probe{label_txt} at 0x{id(self):x} of '{self.attr}' of {self.target}>"

    def __str__(self):
        label_txt = "" if self.label is None else f' "{self.label}"'
        return f"<Probe{label_txt} of '{self.attr}' of {self.target}>"

    @property
    def obj(self):
        """(Nengo object) The underlying Nengo object target."""
        return self.target.obj if isinstance(self.target,
                                             ObjView) else self.target

    @property
    def size_in(self):
        """(int) Dimensionality of the probed signal."""
        return self.target.size_out

    @property
    def size_out(self):
        """(int) Cannot connect from probes, so always 0."""
        return 0

    @property
    def slice(self):
        """(slice) The slice associated with the Nengo object target."""
        return self.target.slice if isinstance(self.target, ObjView) else None
Beispiel #9
0
class Connection(NengoObject):
    """Connects two objects together.

    The connection between the two object is unidirectional,
    transmitting information from the first argument, ``pre``,
    to the second argument, ``post``.

    Almost any Nengo object can act as the pre or post side of a connection.
    Additionally, you can use Python slice syntax to access only some of the
    dimensions of the pre or post object.

    For example, if ``node`` has ``size_out=2`` and ``ensemble`` has
    ``size_in=1``, we could not create the following connection::

        nengo.Connection(node, ensemble)

    But, we could create either of these two connections::

        nengo.Connection(node[0], ensemble)
        nengo.Connection(node[1], ensemble)

    Parameters
    ----------
    pre : Ensemble or Neurons or Node
        The source Nengo object for the connection.
    post : Ensemble or Neurons or Node or Probe
        The destination object for the connection.

    synapse : Synapse, optional \
              (Default: ``nengo.synapses.Lowpass(tau=0.005)``)
        Synapse model to use for filtering (see `~nengo.synapses.Synapse`).
    function : callable, optional (Default: None)
        Function to compute across the connection. Note that ``pre`` must be
        an ensemble to apply a function across the connection.
    transform : (post.size_in, pre.size_out) array_like, optional \
                (Default: ``np.array(1.0)``)
        Linear transform mapping the pre output to the post input.
        This transform is in terms of the sliced size; if either pre
        or post is a slice, the transform must be shaped according to
        the sliced dimensionality. Additionally, the function is applied
        before the transform, so if a function is computed across the
        connection, the transform must be of shape
        ``(len(function(np.zeros(post.size_in))), pre.size_out)``.
    solver : Solver, optional (Default: ``nengo.solvers.LstsqL2()``)
        Solver instance to compute decoders or weights
        (see `~nengo.solvers.Solver`). If ``solver.weights`` is True, a full
        connection weight matrix is computed instead of decoders.
    learning_rule_type : LearningRuleType or iterable of LearningRuleType, \
                         optional (Default: None)
        Modifies the decoders or connection weights during simulation.
    eval_points : (n_eval_points, pre.size_out) array_like or int, optional \
                  (Default: None)
        Points at which to evaluate ``function`` when computing decoders,
        spanning the interval (-pre.radius, pre.radius) in each dimension.
        If None, will use the eval_points associated with ``pre``.
    scale_eval_points : bool, optional (Default: True)
        Indicates whether the evaluation points should be scaled
        by the radius of the pre Ensemble.
    label : str, optional (Default: None)
        A descriptive label for the connection.
    seed : int, optional (Default: None)
        The seed used for random number generation.

    Attributes
    ----------
    is_decoded : bool
        True if and only if the connection is decoded. This will not occur
        when ``solver.weights`` is True or both pre and post are
        `~nengo.ensemble.Neurons`.
    function : callable
        The given function.
    function_size : int
        The output dimensionality of the given function. If no function is
        specified, function_size will be 0.
    label : str
        A human-readable connection label for debugging and visualization.
        If not overridden, incorporates the labels of the pre and post objects.
    learning_rule_type : instance or list or dict of LearningRuleType, optional
        The learning rule types.
    post : Ensemble or Neurons or Node or Probe or ObjView
        The given post object.
    post_obj : Ensemble or Neurons or Node or Probe
        The underlying post object, even if ``post`` is an ``ObjView``.
    post_slice : slice or list or None
        The slice associated with ``post`` if it is an ObjView, or None.
    pre : Ensemble or Neurons or Node or ObjView
        The given pre object.
    pre_obj : Ensemble or Neurons or Node
        The underlying pre object, even if ``post`` is an ``ObjView``.
    pre_slice : slice or list or None
        The slice associated with ``pre`` if it is an ObjView, or None.
    seed : int
        The seed used for random number generation.
    solver : Solver
        The Solver instance that will be used to compute decoders or weights
        (see ``nengo.solvers``).
    synapse : Synapse
        The Synapse model used for filtering across the connection
        (see ``nengo.synapses``).
    transform : (size_mid, size_out) array_like
        Linear transform mapping the pre function output to the post input.
    """

    probeable = ('output', 'input', 'weights')

    pre = PrePostParam('pre', nonzero_size_out=True)
    post = PrePostParam('post', nonzero_size_in=True)
    synapse = SynapseParam('synapse', default=Lowpass(tau=0.005))
    function_info = ConnectionFunctionParam('function',
                                            default=None,
                                            optional=True)
    transform = TransformParam('transform', default=np.array(1.0))
    solver = ConnectionSolverParam('solver', default=LstsqL2())
    learning_rule_type = ConnectionLearningRuleTypeParam('learning_rule_type',
                                                         default=None,
                                                         optional=True)
    eval_points = EvalPointsParam('eval_points',
                                  default=None,
                                  optional=True,
                                  sample_shape=('*', 'size_in'))
    scale_eval_points = BoolParam('scale_eval_points', default=True)
    modulatory = ObsoleteParam(
        'modulatory', "Modulatory connections have been removed. "
        "Connect to a learning rule instead.",
        since="v2.1.0",
        url="https://github.com/nengo/nengo/issues/632#issuecomment-71663849")

    def __init__(self,
                 pre,
                 post,
                 synapse=Default,
                 function=Default,
                 transform=Default,
                 solver=Default,
                 learning_rule_type=Default,
                 eval_points=Default,
                 scale_eval_points=Default,
                 label=Default,
                 seed=Default,
                 modulatory=Unconfigurable):
        super(Connection, self).__init__(label=label, seed=seed)

        self.pre = pre
        self.post = post

        self.synapse = synapse
        self.transform = transform
        self.scale_eval_points = scale_eval_points
        self.eval_points = eval_points  # Must be set before function
        self.function_info = function  # Must be set after transform
        self.solver = solver  # Must be set before learning rule
        self.learning_rule_type = learning_rule_type  # set after transform
        self.modulatory = modulatory

    def __str__(self):
        return "<Connection %s>" % self._str

    def __repr__(self):
        return "<Connection at 0x%x %s>" % (id(self), self._str)

    @property
    def _str(self):
        if self.label is not None:
            return self.label

        desc = "" if self.function is None else " computing '%s'" % (getattr(
            self.function, '__name__', str(self.function)))
        return "from %s to %s%s" % (self.pre, self.post, desc)

    @property
    def function(self):
        return self.function_info.function

    @function.setter
    def function(self, function):
        self.function_info = function

    @property
    def is_decoded(self):
        return not (self.solver.weights or
                    (isinstance(self.pre_obj, Neurons)
                     and isinstance(self.post_obj, Neurons)))

    @property
    def _label(self):
        if self.label is not None:
            return self.label

        return "from %s to %s%s" % (self.pre, self.post,
                                    " computing '%s'" % self.function.__name__
                                    if self.function is not None else "")

    @property
    def learning_rule(self):
        """(LearningRule or iterable) Connectable learning rule object(s)."""
        if self.learning_rule_type is not None and self._learning_rule is None:
            types = self.learning_rule_type
            if isinstance(types, dict):
                self._learning_rule = types.__class__()  # dict of same type
                for k, v in iteritems(types):
                    self._learning_rule[k] = LearningRule(self, v)
            elif is_iterable(types):
                self._learning_rule = [LearningRule(self, v) for v in types]
            elif isinstance(types, LearningRuleType):
                self._learning_rule = LearningRule(self, types)
            else:
                raise ValidationError("Invalid type %r" %
                                      types.__class__.__name__,
                                      attr='learning_rule_type',
                                      obj=self)

        return self._learning_rule

    @property
    def post_obj(self):
        return self.post.obj if isinstance(self.post, ObjView) else self.post

    @property
    def post_slice(self):
        return (self.post.slice
                if isinstance(self.post, ObjView) else slice(None))

    @property
    def pre_obj(self):
        return self.pre.obj if isinstance(self.pre, ObjView) else self.pre

    @property
    def pre_slice(self):
        return self.pre.slice if isinstance(self.pre, ObjView) else slice(None)

    @property
    def size_in(self):
        """(int) The number of output dimensions of the pre object.

        Also the input size of the function, if one is specified.
        """
        return self.pre.size_out

    @property
    def size_mid(self):
        """(int) The number of output dimensions of the function, if specified.

        If the function is not specified, then ``size_in == size_mid``.
        """
        size = self.function_info.size
        return self.size_in if size is None else size

    @property
    def size_out(self):
        """(int) The number of input dimensions of the post object.

        Also the number of output dimensions of the transform.
        """
        return self.post.size_in
Beispiel #10
0
class EchoState(Network, Reservoir):
    """An Echo State Network (ESN) within a Nengo Reservoir.

    This creates a standard Echo State Network (ENS) as a Nengo network,
    defaulting to the standard set of assumptions of non-spiking Tanh units
    and a random recurrent weight matrix [1]_. This is based on the
    minimalist Python implementation from [2]_.

    The network takes some arbitrary time-varying vector as input, encodes it
    randomly, and filters it using nonlinear units and a random recurrent
    weight matrix normalized by its spectral radius.

    This class also inherits ``nengolib.networks.Reservoir``, and thus the
    optimal linear readout is solved for in the same way: the network is
    simulated on a test signal, and then a solver is used to optimize the
    decoding connection weights.

    References:
        [1] http://www.scholarpedia.org/article/Echo_state_network
        [2] http://minds.jacobs-university.de/mantas/code
    """

    n_neurons = IntParam('n_neurons', default=None, low=1)
    dimensions = IntParam('dimensions', default=None, low=1)
    dt = NumberParam('dt', low=0, low_open=True)
    recurrent_synapse = SynapseParam('recurrent_synapse')
    gain = NumberParam('gain', low=0, low_open=True)
    neuron_type = NeuronTypeParam('neuron_type')

    def __init__(self,
                 n_neurons,
                 dimensions,
                 recurrent_synapse=0.005,
                 readout_synapse=None,
                 radii=1.0,
                 gain=1.25,
                 rng=None,
                 neuron_type=Tanh(),
                 include_bias=True,
                 ens_seed=None,
                 label=None,
                 seed=None,
                 add_to_container=None,
                 **ens_kwargs):
        """Initializes the Echo State Network.

        Parameters
        ----------
        n_neurons : int
            The number of neurons to use in the reservoir.
        dimensions : int
            The dimensionality of the input signal.
        recurrent_synapse : nengo.synapses.Synapse (Default: ``0.005``)
            Synapse used to filter the recurrent connection.
        readout_synapse : nengo.synapses.Synapse (Default: ``None``)
            Optional synapse to filter all of the outputs before solving
            for the linear readout. This is included in the connection to the
            ``output`` Node created within the network.
        radii : scalar or array_like, optional (Default: ``1``)
            The radius of each dimension of the input signal, used to normalize
            the incoming connection weights.
        gain : scalar, optional (Default: ``1.25``)
            A scalar gain on the recurrent connection weight matrix.
        rng : ``numpy.random.RandomState``, optional (Default: ``None``)
            Random state used to initialize all weights.
        neuron_type : ``nengo.neurons.NeuronType`` optional \
                      (Default: ``Tanh()``)
            Neuron model to use within the reservoir.
        include_bias : ``bool`` (Default: ``True``)
            Whether to include a bias current to the neural nonlinearity.
            This should be ``False`` if the neuron model already has a bias,
            e.g., ``LIF`` or ``LIFRate``.
        ens_seed : int, optional (Default: ``None``)
            Seed passed to the ensemble of neurons.
        """

        Network.__init__(self, label, seed, add_to_container)

        self.n_neurons = n_neurons
        self.dimensions = dimensions
        self.recurrent_synapse = recurrent_synapse
        self.radii = radii  # TODO: make array or scalar parameter?
        self.gain = gain
        self.rng = np.random if rng is None else rng
        self.neuron_type = neuron_type
        self.include_bias = include_bias

        self.W_in = (self.rng.rand(self.n_neurons, self.dimensions) -
                     0.5) / self.radii
        if self.include_bias:
            self.W_bias = self.rng.rand(self.n_neurons, 1) - 0.5
        else:
            self.W_bias = np.zeros((self.n_neurons, 1))
        self.W = self.rng.rand(self.n_neurons, self.n_neurons) - 0.5
        self.W *= self.gain / max(abs(eig(self.W)[0]))

        with self:
            self.ensemble = nengo.Ensemble(self.n_neurons,
                                           1,
                                           neuron_type=self.neuron_type,
                                           seed=ens_seed,
                                           **ens_kwargs)
            self.input = nengo.Node(size_in=self.dimensions)

            pool = self.ensemble.neurons
            nengo.Connection(self.input,
                             pool,
                             transform=self.W_in,
                             synapse=None)
            nengo.Connection(  # note the bias will be active during training
                nengo.Node(output=1, label="bias"),
                pool,
                transform=self.W_bias,
                synapse=None)
            nengo.Connection(self.ensemble.neurons,
                             pool,
                             transform=self.W,
                             synapse=self.recurrent_synapse)

        Reservoir.__init__(self,
                           self.input,
                           pool,
                           readout_synapse=readout_synapse,
                           network=self)
Beispiel #11
0
 class Test(object):
     sp = SynapseParam('sp', default=Lowpass(0.1))
Beispiel #12
0
class Reservoir(object):
    """A collection of inputs and outputs within some network.

    This class encapsulates:
        - any number of Nengo objects that take some input, e.g. Nodes or
          Ensembles;
        - any number of Nengo objects that produce some output, e.g. Neurons;
        - a network that these objects can be simulated inside;
        - a synapse used to filter the outputs.

    The network can then be trained by injecting a signal to the specified
    inputs, and solving for the optimal linear readout from the outputs.
    After training, the optimal readout will be available by connecting from
    the ``output`` Node in the model.

    This technique is known as reservoir computing, and this class generalizes
    the concept to support NEF dynamics with structured reservoirs, in
    addition to traditional spiking and non-spiking random pools.

    The inputs, outputs, and any internal objects that will vary randomly
    between builds must be given a fixed seed so that they do not differ
    between training and testing.

    See ``doc/notebooks/examples/reservoir.ipynb`` for more information.
    """

    readout_synapse = SynapseParam('readout_synapse')
    _connectable = (NengoObject, ObjView, Neurons)

    def __init__(self, inputs, outputs, readout_synapse=None, network=None):
        """Builds a reservoir containing inputs and outputs.

        Parameters
        ----------
        inputs : nengo.NengoObject, nengo.ObjView, nengo.Neurons, or iterable
            Input (or inputs) within the network, to receive the input signal.
        outputs : nengo.NengoObject, nengo.ObjView, nengo.Neurons, or iterable
            Output (or outputs) within the network, for the linear readout.
        readout_synapse : nengo.synapses.Synapse (Default: ``None``)
            Optional synapse to filter all of the outputs before solving
            for the linear readout. This is included in the connection to the
            ``output`` Node created within the network.
        network : nengo.Network, optional (Default: ``None``)
            The Nengo network that contains all of the inputs and outputs,
            that can be simulated on its own. If ``None`` is supplied, then
            this will automatically use the current network context.
        """

        self.inputs = _to_list(inputs)
        self.outputs = _to_list(outputs)
        self.readout_synapse = readout_synapse

        # Determine dimensionality of reservoir
        self.size_in = 0
        for obj in self.inputs:
            if not isinstance(obj, self._connectable):
                raise TypeError(
                    "inputs (%s) must be connectable Nengo object" %
                    (inputs, ))

            # Increment input size of reservoir
            self.size_in += obj.size_in

        if self.size_in == 0:
            raise ValueError(
                "inputs (%s) must contain at least one input dimension" %
                (inputs, ))

        self.size_mid = 0
        for obj in self.outputs:
            if not isinstance(obj, self._connectable):
                raise TypeError(
                    "outputs (%s) must be connectable Nengo object" %
                    (outputs, ))

            # Increment output size of reservoir
            self.size_mid += obj.size_out

        if self.size_mid == 0:
            raise ValueError(
                "outputs (%s) must contain at least one output dimension" %
                (outputs, ))

        # Determine simulation context
        if network is None:
            if not len(nengo.Network.context):
                raise NetworkContextError(
                    "reservoir must be created within a network block if the "
                    "given network parameter is None")
            self.network = nengo.Network.context[-1]
        else:
            self.network = network

        with self.network:
            # Create a node whichs disperses all of the inputs
            self._proxy_in = nengo.Node(size_in=self.size_in)
            in_used = 0
            for obj in self.inputs:
                nengo.Connection(self._proxy_in[in_used:in_used + obj.size_in],
                                 obj,
                                 synapse=None)
                in_used += obj.size_in
            assert in_used == self.size_in

            # Create a node which collects all of the reservoir outputs
            self._proxy_mid = nengo.Node(size_in=self.size_mid)
            mid_used = 0
            for obj in self.outputs:
                nengo.Connection(obj,
                                 self._proxy_mid[mid_used:mid_used +
                                                 obj.size_out],
                                 synapse=None)
                mid_used += obj.size_out
            assert mid_used == self.size_mid

            # Create a dummy node to hold the eventually learned output
            # It will be the 0 scalar until the train method is called
            self.output = nengo.Node(size_in=1)
            self._readout = nengo.Connection(self._proxy_mid,
                                             self.output,
                                             synapse=self.readout_synapse,
                                             transform=np.zeros(
                                                 (1, self.size_mid)))
            self.size_out = None

    def run(self, t, dt, process, seed=None):
        """Simulate the network on a particular input signal.

        If the network has been trained, this will include the decoded output.

        Parameters
        ----------
        t : float
            A positive number indicating how long the input signal should be
            in simulation seconds.
        dt : float
            A positive number indicating the time elapsed between each
            timestep. The length of each output will be ``int(t / dt)``.
        process : nengo.Process
            An autonomous process that provides a training signal of
            appropriate dimensionality to match the input objects.
        seed : int, optional (Default: ``None``)
            Seed used to initialize the simulator.
        """

        # Setup a sandbox so that the reservoir doesn't keep the
        # input connections and probes added here
        with nengo.Network(add_to_container=False) as sandbox:
            sandbox.add(self.network)

            stim = nengo.Node(output=process, size_out=self.size_in)
            nengo.Connection(stim, self._proxy_in, synapse=None)
            p_in = nengo.Probe(self._proxy_in, synapse=None)
            p_mid = nengo.Probe(self._proxy_mid, synapse=self.readout_synapse)
            p_out = nengo.Probe(self.output, synapse=None)

        with nengo.Simulator(sandbox, dt=dt, seed=seed) as sim:
            sim.run(t, progress_bar=None)

        return sim, (sim.data[p_in], sim.data[p_mid], sim.data[p_out])

    def train(self,
              function,
              t,
              dt,
              process,
              seed=None,
              t_init=0,
              solver=LstsqL2(),
              rng=None):
        """Train an optimal linear readout.

        Afterwards, the decoded and filtered output will be available in the
        model by connecting from the ``output`` Node, or by invoking the
        ``run`` method.

        Parameters
        ----------
        function : callable
            A function that maps the input signal obtained from simulating the
            process (as an ``M``-by-``D`` array, where ``M`` is the number of
            timesteps, and ``D`` is the input dimensionality), to the desired
            signal (of the same shape).
        t : float
            A positive number indicating how long the training signal should be
            in simulation seconds.
        dt : float
            A positive number indicating the time elapsed between each
            timestep. The length of the test signal will be ``int(t / dt)``.
        process : nengo.Process
            An autonomous process that provides a training signal of
            appropriate dimensionality to match the input objects.
        seed : int, optional (Default: ``None``)
            Seed used to initialize the simulator.
        t_init : int, optional (Default: ``0``)
            The number of seconds to discard from the start.
        solver : nengo.solvers.Solver (Default: ``nengo.solvers.LstsqL2()``)
            Solves for ``D`` such that ``AD ~ Y``.
        rng : ``numpy.random.RandomState``, optional (Default: ``None``)
            Random state passed to the solver.
        """

        # Do a safety check for seeds. Note that even if the overall
        # network has a seed, that doesn't necessarily mean everything will
        # be good, because adding components to the network after training
        # may result in seeds being shuffled around within the objects.
        for ens in self.network.all_ensembles:
            if ens.seed is None:
                warnings.warn("reservoir ensemble (%s) should have its own "
                              "seed to help ensure that its parameters do not "
                              "change between training and testing" % ens)

        sim, (data_in, data_mid, _) = self.run(t, dt, process, seed)

        target = np.atleast_1d(function(data_in))
        if target.ndim == 1:
            target = target[:, None]
        if len(data_in) != len(target):
            raise RuntimeError(
                "function expected to return signal of length %d, received %d "
                "instead" % (len(data_in), len(target)))

        offset = int(t_init / dt)
        decoders, info = solver(data_mid[offset:], target[offset:], rng=rng)

        # Update dummy node
        self.output.size_in = self.output.size_out = self.size_out = (
            target.shape[1])
        self._readout.transform = decoders.T

        info.update({'sim': sim, 'data_in': data_in, 'data_mid': data_mid})
        return decoders, info
Beispiel #13
0
class BCM2(LearningRuleType):
    """Bienenstock-Cooper-Munroe learning rule.
    Modifies connection weights as a function of the presynaptic activity
    and the difference between the postsynaptic activity and the average
    postsynaptic activity.
    Notes
    -----
    The BCM rule is dependent on pre and post neural activities,
    not decoded values, and so is not affected by changes in the
    size of pre and post ensembles. However, if you are decoding from
    the post ensemble, the BCM rule will have an increased effect on
    larger post ensembles because more connection weights are changing.
    In these cases, it may be advantageous to scale the learning rate
    on the BCM rule by ``1 / post.n_neurons``.
    Parameters
    ----------
    learning_rate : float, optional (Default: 1e-9)
        A scalar indicating the rate at which weights will be adjusted.
    pre_synapse : `.Synapse`, optional \
                  (Default: ``nengo.synapses.Lowpass(tau=0.005)``)
        Synapse model used to filter the pre-synaptic activities.
    post_synapse : `.Synapse`, optional (Default: ``None``)
        Synapse model used to filter the post-synaptic activities.
        If None, ``post_synapse`` will be the same as ``pre_synapse``.
    theta_synapse : `.Synapse`, optional \
                    (Default: ``nengo.synapses.Lowpass(tau=1.0)``)
        Synapse model used to filter the theta signal.
    max_weights : float, optional (Default: None)
    
    Attributes
    ----------
    learning_rate : float
        A scalar indicating the rate at which weights will be adjusted.
    post_synapse : `.Synapse`
        Synapse model used to filter the post-synaptic activities.
    pre_synapse : `.Synapse`
        Synapse model used to filter the pre-synaptic activities.
    theta_synapse : `.Synapse`
        Synapse model used to filter the theta signal.
    """

    modifies = 'weights'
    probeable = ('theta', 'pre_filtered', 'post_filtered', 'delta')

    learning_rate = NumberParam(
        'learning_rate', low=0, readonly=True, default=1e-9)
    pre_synapse = SynapseParam(
        'pre_synapse', default=Lowpass(tau=0.005), readonly=True)
    post_synapse = SynapseParam(
        'post_synapse', default=None, readonly=True)
    theta_synapse = SynapseParam(
        'theta_synapse', default=Lowpass(tau=1.0), readonly=True)
    max_weight = NumberParam(
         'max_weight', readonly=True, default=None)
    diagonal0 = BoolParam('diagonal0', readonly=True, default=True)

    def __init__(self, learning_rate=Default, pre_synapse=Default,
                 post_synapse=Default, theta_synapse=Default, max_weight=Default,diagonal0=Default,
                 pre_tau=Unconfigurable, post_tau=Unconfigurable,
                 theta_tau=Unconfigurable):
        super().__init__(learning_rate, size_in=0)

        self.max_weight=max_weight
        self.diagonal0 = diagonal0
        
        if pre_tau is Unconfigurable:
            self.pre_synapse = pre_synapse
        else:
            self.pre_tau = pre_tau

        if post_tau is Unconfigurable:
            self.post_synapse = (self.pre_synapse if post_synapse is Default
                                 else post_synapse)
        else:
            self.post_tau = post_tau

        if theta_tau is Unconfigurable:
            self.theta_synapse = theta_synapse
        else:
            self.theta_tau = theta_tau

    @property
    def _argdefaults(self):
        return (('learning_rate', BCM2.learning_rate.default),
                ('pre_synapse', BCM2.pre_synapse.default),
                ('post_synapse', self.pre_synapse),
                ('theta_synapse', BCM2.theta_synapse.default),
                ('max_weight', BCM2.max_weight.default),
                ('diagonal0', BCM2.diagonal0.default))
Beispiel #14
0
 class Test:
     sp = SynapseParam("sp", default=Lowpass(0.1))
Beispiel #15
0
class BCM(LearningRuleType):
    """Bienenstock-Cooper-Munroe learning rule.

    Modifies connection weights as a function of the presynaptic activity
    and the difference between the postsynaptic activity and the average
    postsynaptic activity.

    Notes
    -----
    The BCM rule is dependent on pre and post neural activities,
    not decoded values, and so is not affected by changes in the
    size of pre and post ensembles. However, if you are decoding from
    the post ensemble, the BCM rule will have an increased effect on
    larger post ensembles because more connection weights are changing.
    In these cases, it may be advantageous to scale the learning rate
    on the BCM rule by ``1 / post.n_neurons``.

    Parameters
    ----------
    learning_rate : float, optional
        A scalar indicating the rate at which weights will be adjusted.
    pre_synapse : `.Synapse`, optional
        Synapse model used to filter the pre-synaptic activities.
    post_synapse : `.Synapse`, optional
        Synapse model used to filter the post-synaptic activities.
        If None, ``post_synapse`` will be the same as ``pre_synapse``.
    theta_synapse : `.Synapse`, optional
        Synapse model used to filter the theta signal.

    Attributes
    ----------
    learning_rate : float
        A scalar indicating the rate at which weights will be adjusted.
    post_synapse : `.Synapse`
        Synapse model used to filter the post-synaptic activities.
    pre_synapse : `.Synapse`
        Synapse model used to filter the pre-synaptic activities.
    theta_synapse : `.Synapse`
        Synapse model used to filter the theta signal.
    """

    modifies = "weights"
    probeable = ("theta", "pre_filtered", "post_filtered", "delta")

    learning_rate = NumberParam("learning_rate",
                                low=0,
                                readonly=True,
                                default=1e-9)
    pre_synapse = SynapseParam("pre_synapse",
                               default=Lowpass(tau=0.005),
                               readonly=True)
    post_synapse = SynapseParam("post_synapse", default=None, readonly=True)
    theta_synapse = SynapseParam("theta_synapse",
                                 default=Lowpass(tau=1.0),
                                 readonly=True)

    def __init__(
        self,
        learning_rate=Default,
        pre_synapse=Default,
        post_synapse=Default,
        theta_synapse=Default,
    ):
        super().__init__(learning_rate, size_in=0)

        self.pre_synapse = pre_synapse
        self.post_synapse = (self.pre_synapse
                             if post_synapse is Default else post_synapse)
        self.theta_synapse = theta_synapse

    @property
    def _argreprs(self):
        return _remove_default_post_synapse(super()._argreprs,
                                            self.pre_synapse)
Beispiel #16
0
class Temporal(Solver, SupportDefaultsMixin):
    """Solves for connection weights by accounting for the neural dynamics.

    This allows the optimization procedure to potentially harness any
    correlations in spike-timing between neurons, and/or the adaptative
    dynamics of more detailed neuron models, given the dynamics
    of the desired function with respect to the evaluation points.
    This works by explicitly simulating the neurons given the stimulus, and
    then learning to decode the desired function in the time-domain.

    To use this method, pass it to the ``solver`` parameter for a
    :class:`nengo.Connection`. The ``pre`` object on this connection should be
    a :class:`nengo.Ensemble` that uses some dynamic neuron model.

    Parameters
    ----------
    synapse : :class:`nengo.synapses.Synapse`, optional
        The :class:`nengo.synapses.Synapse` model used to filter the
        pre-synaptic activities of the neurons before being passed to the
        underlying solver. A value of ``None`` will bypass any filtering.
        Defaults to a :class:`nengo.Lowpass` filter with a time-constant of
        5 ms.
    solver : :class:`nengo.solvers.Solver`, optional
        The underlying :class:`nengo.solvers.Solver` used to solve the problem
        ``AD = Y``, where ``A`` are the (potentially filtered) neural
        activities (in response to the evaluation points, over time), ``D``
        are the Nengo decoders, and ``Y`` are the corresponding targets given
        by the ``function`` supplied to the connection.
        Defaults to :class:`nengo.solvers.LstsqL2`.

    See Also
    --------
    :class:`.RLS`
    :class:`nengo.Connection`
    :class:`nengo.solvers.Solver`
    :mod:`.synapses`

    Notes
    -----
    Requires ``nengo>=2.5.0``
    (specifically, `PR #1313 <https://github.com/nengo/nengo/pull/1313>`_).

    If the neuron model for the pre-synaptic population includes some
    internal state that varies over time (which it should, otherwise there is
    little point in using this solver), then the order of the given evaluation
    points will matter. You will likely want to supply them as an array, rather
    than as a distribution. Likewise, you may want to filter your desired
    output, and specify the function as an array on the connection (see example
    below).

    The effect of the solver's regularization has a very different
    interpretation in this context (due to the filtered spiking error having
    its own statistics), and so you may also wish to instantiate the solver
    yourself with some value other than the default regularization.

    Examples
    --------
    Below we use the temporal solver to learn a filtered communication-channel
    (the identity function) using 100 low-threshold spiking (LTS) Izhikevich
    neurons. The training and test data are sampled independently from the
    same band-limited white-noise process.

    >>> from nengolib import Temporal, Network
    >>> import nengo
    >>> neuron_type = nengo.Izhikevich(coupling=0.25)
    >>> tau = 0.005
    >>> process = nengo.processes.WhiteSignal(period=5, high=5, y0=0, rms=0.3)
    >>> eval_points = process.run_steps(5000)
    >>> with Network() as model:
    >>>     stim = nengo.Node(output=process)
    >>>     x = nengo.Ensemble(100, 1, neuron_type=neuron_type)
    >>>     out = nengo.Node(size_in=1)
    >>>     nengo.Connection(stim, x, synapse=None)
    >>>     nengo.Connection(x, out, synapse=None,
    >>>                      eval_points=eval_points,
    >>>                      function=nengo.Lowpass(tau).filt(eval_points),
    >>>                      solver=Temporal(synapse=tau))
    >>>     p_actual = nengo.Probe(out, synapse=tau)
    >>>     p_ideal = nengo.Probe(stim, synapse=tau)
    >>> with nengo.Simulator(model) as sim:
    >>>     sim.run(5)

    >>> import matplotlib.pyplot as plt
    >>> plt.plot(sim.trange(), sim.data[p_actual], label="Actual")
    >>> plt.plot(sim.trange(), sim.data[p_ideal], label="Ideal")
    >>> plt.xlabel("Time (s)")
    >>> plt.legend()
    >>> plt.show()
    """

    synapse = SynapseParam('synapse',
                           default=Lowpass(tau=0.005),
                           readonly=True)
    solver = SolverParam('solver', default=LstsqL2(), readonly=True)

    def __init__(self, synapse=Default, solver=Default):
        # We can't use super here because we need the defaults mixin
        # in order to determine self.solver.weights.
        SupportDefaultsMixin.__init__(self)
        self.synapse = synapse
        self.solver = solver
        Solver.__init__(self, weights=self.solver.weights)

    def __call__(self, A, Y, __hack__=None, **kwargs):
        assert __hack__ is None
        # __hack__ is necessary prior to nengo PR #1359 (<2.6.1)
        # and following nengo PR #1507 (>2.8.0)

        # Note: mul_encoders is never called directly on self.
        # It is invoked on the sub-solver through the following call.
        return self.solver.__call__(A, Y, **kwargs)
Beispiel #17
0
class LinearNetwork(Network):
    """Network implementing a linear time-invariant (LTI) system.

    This network implements the following linear state-space model:

    .. math::

       \\dot{{\\bf x}}(t) &= A{\\bf x}(t) + B{\\bf u}(t) \\\\
              {\\bf y}(t) &= C{\\bf x}(t) + D{\\bf u}(t)

    This works by first realizing a state-space representation from the
    given ``sys`` and ``realizer``, and then using :func:`.ss2sim` to apply
    a generalization of Principle 3 from the Neural Engineering Framework (NEF)
    to map the system onto the given ``synapse``. This yields a :attr:`.mapped`
    system whose state-space matrices give the transformation matrices for the
    resulting Nengo network.

    Parameters
    ----------
    sys : :data:`linear_system_like`
       Linear system representation.
    n_neurons_per_ensemble : ``integer``
       Number of neurons to use per ensemble (i.e., per dimension).
    synapse : :class:`nengo.synapses.Synapse`
        Recurrent synapse used to implement the dynamics, passed to
        :func:`.ss2sim`.
    dt : ``float`` or ``None``
        Simulation time-step (in seconds), passed to :func:`.ss2sim`.
        If ``None``, then this uses the continuous form of Principle 3
        (i.e., assuming a continuous-time synapse with negligible time-step).
        If provided, then ``sys`` will be discretized and the discrete
        form of Principle 3 will be applied. This should always be given
        for digital simulations.
    radii : ``float`` or ``array_like``, optional
        Radius of each dimension of the realized state-space.
        If a single ``float``, then it will be applied to each dimension.
        If ``array_like``, then its length must match :attr:`.size_state`.
        Defaults to ``1``.
    input_synapse : :class:`nengo.synapses.Synapse`, optional
        Input synapse connecting from :attr:`.input` node. Defaults to ``None``
        to discourage double filtering, but should typically match the
        ``synapse`` parameter.
    output_synapse : :class:`nengo.synapses.Synapse`, optional
        Output synapse connecting to :attr:`.output` node.
        Defaults to ``None``.
    realizer : :class:`.AbstractRealizer`, optional
        Method of obtaining a state-space realization of ``sys``.
        Defaults to :class:`.Hankel`.
    solver : :class:`nengo.solvers.Solver`, optional
        Solver used to decode the state.
        Defaults to :class:`nengo.solvers.LstsqL2` (with ``reg=.1``).
    label : str, optional (Default: None)
        Name of the network.
    seed : int, optional (Default: None)
        Random number seed that will be fed to the random number generator.
        Setting the seed makes the network's build process deterministic.
    add_to_container : bool, optional (Default: None)
        Determines if this network will be added to the current container.
        If None, this network will be added to the network at the top of the
        ``Network.context`` stack unless the stack is empty.
    **ens_kwargs : ``dictionary``, optional
        Additional keyword arguments are passed to the
        :class:`nengo.networks.EnsembleArray` that represents the
        :attr:`.state`.

    See Also
    --------
    :class:`.Network`
    :class:`.RollingWindow`
    :class:`.Hankel`
    :func:`.ss2sim`

    Notes
    -----
    By linearity, the ``input_synapse`` and the ``output_synapse`` are
    interchangeable with one another. However, this will modify the
    state-space (according to these same filters) which may impact the quality
    of representation.

    Examples
    --------
    >>> from nengolib.networks import LinearNetwork
    >>> from nengolib.synapses import Bandpass

    Implementing a 5 Hz :func:`.Bandpass` filter (i.e., a decaying 2D
    oscillator) using 1000 spiking LIF neurons:

    >>> import nengo
    >>> from nengolib import Network
    >>> from nengolib.signal import Balanced
    >>> with Network() as model:
    >>>     stim = nengo.Node(output=lambda t: 100*int(t < .01))
    >>>     sys = LinearNetwork(sys=Bandpass(freq=5, Q=10),
    >>>                         n_neurons_per_ensemble=500,
    >>>                         synapse=.1, dt=1e-3, realizer=Balanced())
    >>>     nengo.Connection(stim, sys.input, synapse=None)
    >>>     p = nengo.Probe(sys.state.output, synapse=.01)
    >>> with nengo.Simulator(model, dt=sys.dt) as sim:
    >>>     sim.run(1.)

    Note there are exactly 5 oscillations within 1 second, in response to a
    saturating impulse:

    >>> import matplotlib.pyplot as plt
    >>> plt.plot(*sim.data[p].T)
    >>> plt.xlabel("$x_1(t)$")
    >>> plt.ylabel("$x_2(t)$")
    >>> plt.axis('equal')
    >>> plt.xlim(-1, 1)
    >>> plt.ylim(-1, 1)
    >>> plt.show()
    """

    synapse = SynapseParam('synapse')
    input_synapse = SynapseParam('input_synapse', optional=True)
    output_synapse = SynapseParam('input_synapse', optional=True)
    dt = NumberParam('dt', low=0, low_open=True, optional=True)

    def __init__(self, sys, n_neurons_per_ensemble, synapse, dt, radii=1.0,
                 input_synapse=None, output_synapse=None,
                 realizer=Hankel(), solver=Default,
                 label=None, seed=None, add_to_container=None, **ens_kwargs):
        super(LinearNetwork, self).__init__(label, seed, add_to_container)

        # Parameter checking
        self.sys = LinearSystem(sys)
        self.n_neurons_per_ensemble = n_neurons_per_ensemble
        self.synapse = synapse
        self.dt = dt
        self.radii = radii
        self.input_synapse = input_synapse
        self.output_synapse = output_synapse
        self.realizer = realizer

        if solver is not Default:
            # https://github.com/nengo/nengo/issues/1044
            solver._hack = random()

        if len(self.sys) == 0:
            raise ValueError("system (%s) is zero order" % self.sys)

        if self.sys.has_passthrough and self.output_synapse is None:
            # the user shouldn't filter the output node themselves. an
            # output synapse should be given so we can do it before the
            # passthrough.
            warnings.warn("output_synapse should be given if the system has "
                          "a passthrough, otherwise filtering the output will "
                          "also filter the passthrough")

        if not self.sys.is_stable:
            # This means certain normalizers won't work, because the worst-case
            # output is now unbounded.
            warnings.warn("system (%s) is not exponentially stable" % self.sys)

        # Obtain state-space transformation and realization
        self._realizer_result = self.realizer(self.sys, self.radii)

        # Map the system onto the synapse
        self._mapped = ss2sim(self.realization, self.synapse, self.dt)

        with self:
            # Create internal Nengo objects
            self._input = nengo.Node(size_in=self.size_in, label="input")
            self._output = nengo.Node(size_in=self.size_out, label="output")

            x_input, x_output = self._make_core(solver, **ens_kwargs)

            # Connect everything up using (A, B, C, D)
            nengo.Connection(
                x_output, x_input, transform=self.A,
                synapse=self.synapse)
            nengo.Connection(
                self.input, x_input, transform=self.B,
                synapse=self.input_synapse)
            nengo.Connection(
                x_output, self.output, transform=self.C,
                synapse=self.output_synapse)

            if not np.allclose(self.D, 0):
                logging.info("Passthrough (%s) on LinearNetwork with sys=%s",
                             self.D, self.sys)
                nengo.Connection(
                    self.input, self.output, transform=self.D,
                    synapse=None)

    def _make_core(self, solver, **ens_kwargs):
        self._state = nengo.networks.EnsembleArray(
            self.n_neurons_per_ensemble, self.size_state,
            ens_dimensions=1, label="x", **ens_kwargs)

        if solver is not Default:
            # https://github.com/nengo/nengo/issues/1040
            self.state.add_output('output', function=None, solver=solver)

        return self.state.input, self.state.output

    @property
    def realizer_result(self):
        """The :class:`.RealizerResult` produced by ``realizer``."""
        return self._realizer_result

    @property
    def realization(self):
        """Realized :class:`.LinearSystem`."""
        return self.realizer_result.realization  # convenience

    @property
    def mapped(self):
        """Mapped :class:`.LinearSystem`."""
        return self._mapped

    @property
    def A(self):
        """``A`` state-space matrix of mapped :class:`.LinearSystem`."""
        return self.mapped.A

    @property
    def B(self):
        """``B`` state-space matrix of mapped :class:`.LinearSystem`."""
        return self.mapped.B

    @property
    def C(self):
        """``C`` state-space matrix of mapped :class:`.LinearSystem`."""
        return self.mapped.C

    @property
    def D(self):
        """``D`` state-space matrix of mapped :class:`.LinearSystem`."""
        return self.mapped.D

    @property
    def size_in(self):
        """Input dimensionality."""
        return self.mapped.size_in

    @property
    def size_state(self):
        """State dimensionality."""
        return len(self.mapped)

    @property
    def size_out(self):
        """Output dimensionality."""
        return self.mapped.size_out

    @property
    def input(self):
        """Nengo object representing the input ``u(t)`` to the system."""
        return self._input

    @property
    def state(self):
        """Nengo object representing the state ``x(t)`` of the system."""
        return self._state

    @property
    def output(self):
        """Nengo object representing the output ``y(t)`` of the system."""
        return self._output
Beispiel #18
0
class RLS(LearningRuleType):
    """Recursive least-squares rule for online decoder optimization.

    This may be used to learn the weights on a :class:`nengo.Connection`,
    online, in an L2-optimal manner. To be applied in the same scenarios as
    :class:`nengo.PES`, to minimize some error signal.

    In the end, the only real difference between RLS learning and using the
    :class:`nengo.solvers.LstsqL2` solver, is *when* the learning takes
    place. In the former case, the weights are learned online from an error
    signal over time, whereas in the latter case, the weights are learned
    offline in a batch optimization from the provided training data
    (``eval_points`` and ``function``).

    The cost of RLS is :math:`\\mathcal{O}\\left(n^2\\right)` extra
    time and memory. It is typically much more efficient to do the learning
    offline using the :class:`nengo.solvers.LstsqL2` solver.

    Parameters
    ----------
    learning_rate : ``float``, optional
        Effective learning rate. This is better understood as
        :math:`\\frac{1}{\\alpha}`, where :math:`\\alpha` is an
        L2-regularization term. A large learning rate means little
        regularization, which implies quick over-fitting. A small learning
        rate means large regularization, which translates to slower
        learning. Defaults to 1.0. [#]_
    pre_synapse : :class:`nengo.synapses.Synapse`, optional
        Filter applied to the pre-synaptic neural activities, for the
        purpose of applying the weight update.
        Defaults to a :class:`nengo.Lowpass` filter with a time-constant of
        5 ms.

    See Also
    --------
    :class:`nengo.PES`
    :class:`nengo.solvers.LstsqL2`
    :class:`.Temporal`

    Notes
    -----
    RLS works by maintaining the inverse neural correlation matrix,
    :math:`\\Gamma^{-1}`, where :math:`\\Gamma = A^T A + \\alpha I` are the
    regularized correlations, :math:`A` is a matrix of (possibly filtered)
    neural activities, and :math:`\\alpha` is an L2-regularization term
    controlled by the ``learning_rate``. This matrix is used to project the
    error signal and update the weights to be L2-optimal, at each time-step.

    The time-step does not play a role in this learning rule, apart from
    determining the time-scale over which the ``pre_synapse`` is discretized.
    A complete learning update is applied on every time-step.

    Attributes that can be probed from this learning rule:
    ``pre_filtered``, ``error``, ``delta``, ``inv_gamma``.

    References
    ----------
    .. [#] Sussillo, D., & Abbott, L. F. (2009). Generating coherent patterns
       of activity from chaotic neural networks. Neuron, 63(4), 544-557.

    Examples
    --------
    See :doc:`notebooks/examples/full_force_learning` for an example of how to
    use RLS to learn spiking FORCE [1]_ and "full-FORCE" networks in Nengo.

    Below, we compare :class:`nengo.PES` against :class:`.RLS`, learning a
    feed-forward communication channel (identity function), online,
    and starting with 100 spiking LIF neurons from scratch (zero weights).
    A faster learning rate for :class:`nengo.PES` results in over-fitting to
    the most recent online example, while a slower learning rate does not
    learn quickly enough. This is a general problem with greedy optimization.
    :class:`.RLS` performs better since it is L2-optimal.

    >>> from nengolib import RLS, Network
    >>> import nengo
    >>> from nengo import PES
    >>> tau = 0.005
    >>> learning_rules = (PES(learning_rate=1e-3, pre_tau=tau),
    >>>                   RLS(learning_rate=1e-5, pre_synapse=tau))

    >>> with Network() as model:
    >>>     u = nengo.Node(output=lambda t: np.sin(2*np.pi*t))
    >>>     probes = []
    >>>     for lr in learning_rules:
    >>>         e = nengo.Node(size_in=1,
    >>>                        output=lambda t, e: e if t < 1 else 0)
    >>>         x = nengo.Ensemble(100, 1, seed=0)
    >>>         y = nengo.Node(size_in=1)
    >>>
    >>>         nengo.Connection(u, e, synapse=None, transform=-1)
    >>>         nengo.Connection(u, x, synapse=None)
    >>>         conn = nengo.Connection(
    >>>             x, y, synapse=None, learning_rule_type=lr,
    >>>             function=lambda _: 0)
    >>>         nengo.Connection(y, e, synapse=None)
    >>>         nengo.Connection(e, conn.learning_rule, synapse=tau)
    >>>         probes.append(nengo.Probe(y, synapse=tau))
    >>>     probes.append(nengo.Probe(u, synapse=tau))

    >>> with nengo.Simulator(model) as sim:
    >>>     sim.run(2.0)

    >>> import matplotlib.pyplot as plt
    >>> plt.plot(sim.trange(), sim.data[probes[0]],
    >>>          label=str(learning_rules[0]))
    >>> plt.plot(sim.trange(), sim.data[probes[1]],
    >>>          label=str(learning_rules[1]))
    >>> plt.plot(sim.trange(), sim.data[probes[2]],
    >>>          label="Ideal", linestyle='--')
    >>> plt.vlines([1], -1, 1, label="Training -> Testing")
    >>> plt.ylim(-2, 2)
    >>> plt.legend(loc='upper right')
    >>> plt.xlabel("Time (s)")
    >>> plt.show()
    """

    modifies = 'decoders'
    probeable = ('pre_filtered', 'error', 'delta', 'inv_gamma')

    pre_synapse = SynapseParam('pre_synapse', readonly=True)

    def __init__(self, learning_rate=1.0, pre_synapse=Lowpass(tau=0.005)):
        if version_info >= (2, 4, 1):
            # https://github.com/nengo/nengo/pull/1310
            super(RLS, self).__init__(learning_rate, size_in='post_state')
        else:  # pragma: no cover
            self.error_type = 'decoded'
            super(RLS, self).__init__(learning_rate)

        self.pre_synapse = pre_synapse

    def __repr__(self):
        return "%s(learning_rate=%r, pre_synapse=%r)" % (
            type(self).__name__, self.learning_rate, self.pre_synapse)
Beispiel #19
0
class Oja(LearningRuleType):
    """Oja learning rule.

    Modifies connection weights according to the Hebbian Oja rule, which
    augments typically Hebbian coactivity with a "forgetting" term that is
    proportional to the weight of the connection and the square of the
    postsynaptic activity.

    Notes
    -----
    The Oja rule is dependent on pre and post neural activities,
    not decoded values, and so is not affected by changes in the
    size of pre and post ensembles. However, if you are decoding from
    the post ensemble, the Oja rule will have an increased effect on
    larger post ensembles because more connection weights are changing.
    In these cases, it may be advantageous to scale the learning rate
    on the Oja rule by ``1 / post.n_neurons``.

    Parameters
    ----------
    learning_rate : float, optional (Default: 1e-6)
        A scalar indicating the rate at which weights will be adjusted.
    pre_synapse : `.Synapse`, optional \
                  (Default: ``nengo.synapses.Lowpass(tau=0.005)``)
        Synapse model used to filter the pre-synaptic activities.
    post_synapse : `.Synapse`, optional (Default: ``None``)
        Synapse model used to filter the post-synaptic activities.
        If None, ``post_synapse`` will be the same as ``pre_synapse``.
    beta : float, optional (Default: 1.0)
        A scalar weight on the forgetting term.

    Attributes
    ----------
    beta : float
        A scalar weight on the forgetting term.
    learning_rate : float
        A scalar indicating the rate at which weights will be adjusted.
    post_synapse : `.Synapse`
        Synapse model used to filter the post-synaptic activities.
    pre_synapse : `.Synapse`
        Synapse model used to filter the pre-synaptic activities.
    """

    modifies = 'weights'
    probeable = ('pre_filtered', 'post_filtered', 'delta')

    learning_rate = NumberParam('learning_rate',
                                low=0,
                                readonly=True,
                                default=1e-6)
    pre_synapse = SynapseParam('pre_synapse',
                               default=Lowpass(tau=0.005),
                               readonly=True)
    post_synapse = SynapseParam('post_synapse', default=None, readonly=True)
    beta = NumberParam('beta', low=0, readonly=True, default=1.0)

    pre_tau = _deprecated_tau("pre_tau", "pre_synapse")
    post_tau = _deprecated_tau("post_tau", "post_synapse")

    def __init__(self,
                 learning_rate=Default,
                 pre_synapse=Default,
                 post_synapse=Default,
                 beta=Default,
                 pre_tau=Unconfigurable,
                 post_tau=Unconfigurable):
        super().__init__(learning_rate, size_in=0)

        self.beta = beta

        if pre_tau is Unconfigurable:
            self.pre_synapse = pre_synapse
        else:
            self.pre_tau = pre_tau

        if post_tau is Unconfigurable:
            self.post_synapse = (self.pre_synapse
                                 if post_synapse is Default else post_synapse)
        else:
            self.post_tau = post_tau

    @property
    def _argdefaults(self):
        return (('learning_rate', Oja.learning_rate.default),
                ('pre_synapse', Oja.pre_synapse.default),
                ('post_synapse', self.pre_synapse), ('beta', Oja.beta.default))
Beispiel #20
0
class mPES(LearningRuleType):
    modifies = "weights"
    probeable = ("error", "activities", "delta", "pos_memristors",
                 "neg_memristors")

    learning_rate = NumberParam("learning_rate",
                                low=0,
                                readonly=True,
                                default=1e-4)
    pre_synapse = SynapseParam("pre_synapse",
                               default=Lowpass(tau=0.005),
                               readonly=True)
    r_max = NumberParam("r_max", readonly=True, default=2.5e8)
    r_min = NumberParam("r_min", readonly=True, default=1e2)

    def __init__(self,
                 initial_conductances_pos,
                 initial_conductances_neg,
                 learning_rate=Default,
                 pre_synapse=Default,
                 r_max=Default,
                 r_min=Default,
                 seed=None):
        super().__init__(learning_rate, size_in="post_state")
        if learning_rate is not Default and learning_rate >= 1.0:
            warnings.warn("This learning rate is very high, and can result "
                          "in floating point errors from too much current.")

        self.initial_conductances_pos = initial_conductances_pos
        self.initial_conductances_neg = initial_conductances_neg

        self.pre_synapse = pre_synapse
        self.r_max = r_max
        self.r_min = r_min

        np.random.seed(seed)

    def normalized_conductance(self, R):
        epsilon = np.finfo(float).eps
        gain = 1e5

        g_curr = 1.0 / R
        g_min = 1.0 / self.r_max
        g_max = 1.0 / self.r_min

        return gain * (((g_curr - g_min) / (g_max - g_min)) + epsilon)

    def initial_normalized_conductances(self, low, high, shape):
        gain = 1e5

        g_curr = 1.0 / np.random.uniform(low, high, shape)
        g_min = 1.0 / self.r_max
        g_max = 1.0 / self.r_min

        return ((g_curr - g_min) / (g_max - g_min)) * gain

    def initial_resistances(self, low, high, shape):
        return np.random.uniform(low, high, shape)

    @property
    def _argdefaults(self):
        return (
            ("learning_rate", PES.learning_rate.default),
            ("pre_synapse", PES.pre_synapse.default),
        )
class Thalamus(Network):
    """Inhibits non-selected actions.

    The thalamus is intended to work in tandem with a `.BasalGanglia` module.
    It converts basal ganglia output into a signal with (approximately) 1 for
    the selected action and 0 elsewhere.

    In order to suppress low responses and strengthen high responses,
    a constant bias is added to each dimension (i.e., action), and dimensions
    mutually inhibit each other. Additionally, the ensemble representing
    each dimension is created with positive encoders and can be assigned
    positive x-intercepts to threshold low responses.

    Parameters
    ----------
    neurons_action : int, optional (Default: 50)
        Number of neurons per action to represent the selection.
    threshold_action : float, optional (Default: 0.2)
        Minimum value for action representation.
    mutual_inhibit : float, optional (Default: 1.0)
        Strength of inhibition between actions.
    route_inhibit : float, optional (Default: 3.0)
        Strength of inhibition for unchosen actions.
    synapse_inhibit : float, optional (Default: 0.008)
        Synaptic filter to apply for inhibition between actions.
    synapse_bg : float, optional (Default: 0.008)
        Synaptic filter for connection between basal ganglia and thalamus.
    synapse_direct : float, optional (Default: 0.01)
        Synaptic filter for direct outputs.
    neurons_channel_dim : int, optional (Default: 50)
        Number of neurons per routing channel dimension.
    synapse_channel : float, optional (Default: 0.01)
        Synaptic filter for channel inputs and outputs.
    neurons_gate : int, optional (Default: 40)
        Number of neurons per gate.
    threshold_gate : float, optional (Default: 0.3)
        Minimum value for gating neurons.
    synapse_to-gate : float, optional (Default: 0.002)
        Synaptic filter for controlling a gate.
    kwargs : dict
        Passed through to `nengo_spa.Network`.

    Attributes
    ----------
    actions : nengo.networks.EnsembleArray
        Each ensemble represents one dimension (action).
    bias : nengo.Node
        The constant bias injected in each *actions* ensemble.
    input : nengo.Node
        Input to the *actions* ensembles.
    output : nengo.Node
        Output from the *actions* ensembles.
    """

    neurons_action = IntParam('neurons_action', default=50)
    threshold_action = NumberParam('threshold_action', default=0.2)
    mutual_inhibit = NumberParam('mutual_inhibit', default=1.)
    route_inhibit = NumberParam('route_inhibit', default=3.)
    synapse_inhibit = SynapseParam('synapse_inhibit', default=Lowpass(0.008))
    synapse_bg = SynapseParam('synapse_bg', default=Lowpass(0.008))
    neurons_channel_dim = IntParam('neurons_channel_dim', default=50)
    synapse_channel = SynapseParam('synapse_channel', default=Lowpass(0.01))
    neurons_gate = IntParam('neurons_gate', default=40)
    threshold_gate = NumberParam('threshold_gate', default=0.3)
    synapse_to_gate = SynapseParam('synapse_to_gate', default=Lowpass(0.002))

    def __init__(self,
                 action_count,
                 neurons_action=Default,
                 threshold_action=Default,
                 mutual_inhibit=Default,
                 route_inhibit=Default,
                 synapse_inhibit=Default,
                 synapse_bg=Default,
                 neurons_channel_dim=Default,
                 synapse_channel=Default,
                 neurons_gate=Default,
                 threshold_gate=Default,
                 synapse_to_gate=Default,
                 **kwargs):
        kwargs.setdefault('label', "Thalamus")
        super(Thalamus, self).__init__(**kwargs)

        self.action_count = action_count
        self.neurons_action = neurons_action
        self.mutual_inhibit = mutual_inhibit
        self.route_inhibit = route_inhibit
        self.synapse_inhibit = synapse_inhibit
        self.threshold_action = threshold_action
        self.neurons_channel_dim = neurons_channel_dim
        self.synapse_channel = synapse_channel
        self.neurons_gate = neurons_gate
        self.threshold_gate = threshold_gate
        self.synapse_to_gate = synapse_to_gate
        self.synapse_bg = synapse_bg

        self.gates = {}  # gating ensembles per action (created as needed)
        self.channels = []  # channels to pass data between networks

        self.gate_in_connections = {}
        self.gate_out_connections = {}
        self.channel_out_connections = []
        self.fixed_connections = {}
        self.bg_connection = None

        with self:
            self.actions = nengo.networks.EnsembleArray(
                self.neurons_action,
                self.action_count,
                intercepts=nengo.dists.Uniform(self.threshold_action, 1),
                encoders=nengo.dists.Choice([[1.0]]),
                label="actions")
            nengo.Connection(self.actions.output,
                             self.actions.input,
                             transform=(np.eye(self.action_count) - 1) *
                             self.mutual_inhibit)
            self.bias = nengo.Node([1], label="thalamus bias")
            nengo.Connection(self.bias,
                             self.actions.input,
                             transform=np.ones((self.action_count, 1)))

        self.input = self.actions.input
        self.output = self.actions.output

    def construct_gate(self, index, bias, label=None):
        """Construct a gate ensemble.

        The gate neurons have no activity when the action is selected, but are
        active when the action is not selected. This makes the gate useful for
        inhibiting ensembles that should only be active when this action is
        active.

        Parameters
        ----------
        index : int
            Index to identify the gate.
        bias : :class:`nengo.Network`
            Node providing a bias input of 1.
        label : str, optional
            Label for the gate.

        Returns
        -------
        nengo.Ensemble
            The constructed gate.
        """
        if label is None:
            label = 'gate[%d]' % index
        intercepts = Uniform(self.threshold_gate, 1)
        self.gates[index] = gate = nengo.Ensemble(self.neurons_gate,
                                                  dimensions=1,
                                                  intercepts=intercepts,
                                                  label=label,
                                                  encoders=[[1]] *
                                                  self.neurons_gate)
        nengo.Connection(bias, gate, synapse=None)

        self.gate_in_connections[index] = nengo.Connection(
            self.actions.ensembles[index],
            self.gates[index],
            synapse=self.synapse_to_gate,
            transform=-1)

        return self.gates[index]

    def construct_channel(self, sink, type_, label=None):
        """Construct a channel.

        Channels are an additional neural population in-between a source
        population and a target population. This allows inhibiting the channel
        without affecting the source and thus is useful in routing information.

        Parameters
        ----------
        sink : nengo.base.NengoObject
            Sink/target that the channel feeds into.
        type_ : nengo_spa.types.Type
            Type of the data transmitted through the channel.
        label : str, optional
            Label for the channel.

        Returns
        -------
        :class:`nengo.networks.EnsembleArray`
            The constructed channel.
        """
        if label is None:
            label = 'channel'
        if type_ == TScalar:
            channel = dynamic.ScalarRealization()
        else:
            channel = dynamic.StateRealization(vocab=type_.vocab)

        self.channels.append(channel)
        self.channel_out_connections.append(
            nengo.Connection(channel.output,
                             sink,
                             synapse=self.synapse_channel))
        return channel

    def connect_bg(self, bg):
        """Connect a basal ganglia network to this thalamus."""
        self.bg_connection = nengo.Connection(bg.output,
                                              self.input,
                                              synapse=self.synapse_bg)

    def connect_gate(self, index, channel):
        """Connect a gate to a channel for information routing.

        Parameters
        ----------
        index : int
            Index of the gate to connect.
        channel : nengo.networks.EnsembleArray
            Channel to inhibit with the gate.
        """
        if isinstance(channel, Scalar):
            target = channel.scalar.neurons
        elif isinstance(channel, State):
            target = channel.state_ensembles.add_neuron_input()
        else:
            raise NotImplementedError()

        inhibit = ([[-self.route_inhibit]] * (target.size_in))
        self.gate_out_connections[index] = nengo.Connection(
            self.gates[index],
            target,
            transform=inhibit,
            synapse=self.synapse_inhibit)

    def connect_fixed(self, index, target, transform):
        """Create connection to route fixed value.

        Parameters
        ----------
        index : int
            Index of the action to connect.
        target : nengo.base.NengoObject
            Target of the connection.
        transform : array-like
            Transform to apply to apply to the connection.
        """
        self.fixed_connections[index] = self.connect(
            self.actions.ensembles[index], target, transform)

    def connect(self, source, target, transform):
        """Create connection.

        The connection will use the thalamus's *synapse_channel*.

        Parameters
        ----------
        source : nengo.base.NengoObject
            Source object.
        target : nengo.base.NengoObject
            Target object.
        transform : array-like
            Transform to apply to the connection.
        """
        return nengo.Connection(source,
                                target,
                                transform=transform,
                                synapse=self.synapse_channel)
Beispiel #22
0
 class Test(object):
     sp = SynapseParam(default=nengo.Lowpass(0.1))
Beispiel #23
0
class RLS(LearningRuleType):
    r"""Recursive least-squares rule for online decoder optimization.

    This implements an online version of the standard least-squares solvers used
    to learn connection weights offline (e.g. `nengo.solvers.LstsqL2`). It can be
    applied in the same scenarios as `.PES`, to minimize an error signal.

    The cost of RLS is :math:`\mathcal{O}(n^2)` extra time and memory. If possible,
    it is more efficient to do the learning offline using e.g. `.LstsqL2`.

    Parameters
    ----------
    learning_rate : float, optional
        Effective learning rate. This is better understood as
        :math:`\frac{1}{\alpha}`, where :math:`\alpha` is an L2-regularization
        term. A large learning rate means little regularization, which implies
        quick over-fitting. A small learning rate means large regularization,
        which translates to slower learning [#]_.
    pre_synapse : Synapse, optional
        Synapse model applied to the pre-synaptic neural activities.

    See Also
    --------
    nengo.PES
    nengo.solvers.LstsqL2

    Notes
    -----
    RLS works by maintaining the inverse neural correlation matrix,
    :math:`P = \Gamma^{-1}`, where :math:`\Gamma = A^T A + \alpha I` are the
    regularized correlations, :math:`A` is a matrix of (possibly filtered)
    neural activities, and :math:`\alpha` is an L2-regularization term
    controlled by the ``learning_rate``. :math:`P` is used to project the
    error signal and update the weights each time-step.

    References
    ----------
    .. [#] Sussillo, D., & Abbott, L. F. (2009). Generating coherent patterns
       of activity from chaotic neural networks. Neuron, 63(4), 544-557.

    Examples
    --------
    Below, we compare `.PES` against `.RLS`, learning a feed-forward
    communication channel (identity function) online, starting with 100 spiking
    LIF neurons with decoders (weights) set to zero. A faster learning rate for
    `.PES` results in over-fitting to the most recent online example, while a
    slower learning rate does not learn quickly enough. This is a general problem
    with greedy optimization. `.RLS` performs better since it is L2-optimal.

    .. testcode::

       from nengo.learning_rules import PES, RLS

       tau = 0.005
       learning_rules = (
           PES(learning_rate=1e-3, pre_synapse=tau),
           RLS(learning_rate=1e-3, pre_synapse=tau),
       )

       with nengo.Network() as model:
           u = nengo.Node(output=lambda t: np.sin(2 * np.pi * t))
           probes = []
           for lr in learning_rules:
               e = nengo.Node(size_in=1, output=lambda t, e: e if t < 1 else 0)
               x = nengo.Ensemble(100, 1, seed=0)
               y = nengo.Node(size_in=1)

               nengo.Connection(u, e, synapse=None, transform=-1)
               nengo.Connection(u, x, synapse=None)
               conn = nengo.Connection(
                   x, y, synapse=None, learning_rule_type=lr, function=lambda x: 0
               )
               nengo.Connection(y, e, synapse=None)
               nengo.Connection(e, conn.learning_rule, synapse=tau)
               probes.append(nengo.Probe(y, synapse=tau))
           probes.append(nengo.Probe(u, synapse=tau))

       with nengo.Simulator(model) as sim:
           sim.run(2.0)

       plt.plot(sim.trange(), sim.data[probes[0]], label=str(learning_rules[0]))
       plt.plot(sim.trange(), sim.data[probes[1]], label=str(learning_rules[1]))
       plt.plot(sim.trange(), sim.data[probes[2]], label="Ideal", linestyle="--")
       plt.vlines([1], -1, 1, label="Training -> Testing")
       plt.ylim(-2, 2)
       plt.legend(loc="upper right")
       plt.xlabel("Time (s)")

    .. testoutput::
      :hide:

      ...
    """

    modifies = "decoders"
    probeable = ("pre_filtered", "error", "delta", "inv_gamma")

    learning_rate = NumberParam("learning_rate",
                                low=0,
                                readonly=True,
                                default=1e-3)
    pre_synapse = SynapseParam("pre_synapse",
                               default=Lowpass(tau=0.005),
                               readonly=True)

    def __init__(self, learning_rate=Default, pre_synapse=Default):
        super().__init__(learning_rate=learning_rate, size_in="post_state")
        self.pre_synapse = pre_synapse
Beispiel #24
0
 class Test:
     sp = SynapseParam('sp', default=Lowpass(0.1))
Beispiel #25
0
class Oja(LearningRuleType):
    """Oja learning rule.

    Modifies connection weights according to the Hebbian Oja rule, which
    augments typically Hebbian coactivity with a "forgetting" term that is
    proportional to the weight of the connection and the square of the
    postsynaptic activity.

    Notes
    -----
    The Oja rule is dependent on pre and post neural activities,
    not decoded values, and so is not affected by changes in the
    size of pre and post ensembles. However, if you are decoding from
    the post ensemble, the Oja rule will have an increased effect on
    larger post ensembles because more connection weights are changing.
    In these cases, it may be advantageous to scale the learning rate
    on the Oja rule by ``1 / post.n_neurons``.

    Parameters
    ----------
    learning_rate : float, optional
        A scalar indicating the rate at which weights will be adjusted.
    pre_synapse : `.Synapse`, optional
        Synapse model used to filter the pre-synaptic activities.
    post_synapse : `.Synapse`, optional
        Synapse model used to filter the post-synaptic activities.
        If None, ``post_synapse`` will be the same as ``pre_synapse``.
    beta : float, optional
        A scalar weight on the forgetting term.

    Attributes
    ----------
    beta : float
        A scalar weight on the forgetting term.
    learning_rate : float
        A scalar indicating the rate at which weights will be adjusted.
    post_synapse : `.Synapse`
        Synapse model used to filter the post-synaptic activities.
    pre_synapse : `.Synapse`
        Synapse model used to filter the pre-synaptic activities.
    """

    modifies = "weights"
    probeable = ("pre_filtered", "post_filtered", "delta")

    learning_rate = NumberParam("learning_rate",
                                low=0,
                                readonly=True,
                                default=1e-6)
    pre_synapse = SynapseParam("pre_synapse",
                               default=Lowpass(tau=0.005),
                               readonly=True)
    post_synapse = SynapseParam("post_synapse", default=None, readonly=True)
    beta = NumberParam("beta", low=0, readonly=True, default=1.0)

    def __init__(
        self,
        learning_rate=Default,
        pre_synapse=Default,
        post_synapse=Default,
        beta=Default,
    ):
        super().__init__(learning_rate, size_in=0)

        self.beta = beta
        self.pre_synapse = pre_synapse
        self.post_synapse = (self.pre_synapse
                             if post_synapse is Default else post_synapse)

    @property
    def _argreprs(self):
        return _remove_default_post_synapse(super()._argreprs,
                                            self.pre_synapse)
Beispiel #26
0
class Probe(NengoObject):
    """A probe is an object that receives data from the simulation.

    This is to be used in any situation where you wish to gather simulation
    data (spike data, represented values, neuron voltages, etc.) for analysis.

    Probes cannot directly affect the simulation.

    All Nengo objects can be probed (except Probes themselves).
    Each object has different attributes that can be probed.
    To see what is probeable for each object, print its `probeable` attribute.

    >>> with nengo.Network():
    ...     ens = nengo.Ensemble(10, 1)
    >>> print(ens.probeable)
    ['decoded_output', 'input']

    Parameters
    ----------
    target : Ensemble, Node, Connection
        The Nengo object to connect to the probe.
    attr : str, optional
        The quantity to probe. Refer to the target's ``probeable`` list for
        details. Defaults to the first element in the list.
    sample_every : float, optional
        Sampling period in seconds.
    synapse : float, optional
        Post-synaptic time constant (PSTC) to use for filtering. Default is
        no filtering.
    solver : Solver, optional
        Instance of a Solver class to compute decoders for probes that require
        them (see `nengo.solvers`). Defaults to the same solver as Connection.
    seed : int
        The seed used for random number generation in the Connection.
    label : str, optional
        A name for the probe. Used for debugging and visualization.
    """

    target = TargetParam(nonzero_size_out=True)
    attr = AttributeParam(default=None)
    sample_every = NumberParam(default=None, optional=True, low=1e-10)
    synapse = SynapseParam(default=None)
    solver = ProbeSolverParam(default=ConnectionDefault)
    seed = IntParam(default=None, optional=True)
    label = StringParam(default=None, optional=True)

    def __init__(self,
                 target,
                 attr=None,
                 sample_every=Default,
                 synapse=Default,
                 solver=Default,
                 seed=Default,
                 label=Default):
        self.target = target
        self.attr = attr if attr is not None else self.obj.probeable[0]
        self.sample_every = sample_every
        self.synapse = synapse
        self.solver = solver
        self.seed = seed
        self.label = label

    @property
    def obj(self):
        return (self.target.obj
                if isinstance(self.target, ObjView) else self.target)

    @property
    def slice(self):
        return (self.target.slice
                if isinstance(self.target, ObjView) else slice(None))

    @property
    def size_in(self):
        return self.target.size_out

    @property
    def size_out(self):
        return 0

    def __str__(self):
        return "<Probe%s of '%s' of %s>" % ("" if self.label is None else
                                            ' "%s"' % self.label, self.attr,
                                            self.target)

    def __repr__(self):
        return "<Probe%s at 0x%x of '%s' of %s>" % (
            "" if self.label is None else ' "%s"' % self.label, id(self),
            self.attr, self.target)
class BasalGanglia(Network):
    """Winner take all network, typically used for action selection.

    The basal ganglia network outputs approximately 0 at the dimension with
    the largest value, and is negative elsewhere.

    While the basal ganglia is primarily defined by its winner-take-all
    function, it is also organized to match the organization of the human
    basal ganglia. It consists of five ensembles:

    * Striatal D1 dopamine-receptor neurons (*strD1*)
    * Striatal D2 dopamine-receptor neurons (*strD2*)
    * Subthalamic nucleus (*stn*)
    * Globus pallidus internus / substantia nigra reticulata (*gpi*)
    * Globus pallidus externus (*gpe*)

    Interconnections between these areas are also based on known
    neuroanatomical connections. See [1]_ for more details, and [2]_ for
    the original non-spiking basal ganglia model by
    Gurney, Prescott & Redgrave that this model is based on.

    .. note:: The default `nengo.solvers.Solver` for the basal ganglia is
              `nengo.solvers.NnlsL2nz`, which requires SciPy. If SciPy is not
              installed, the global default solver will be used instead.

    Parameters
    ----------
    action_count : int
        Number of actions.
    n_neuron_per_ensemble : int, optional
        Number of neurons in each ensemble in the network.
    output_weight : float, optional
        A scaling factor on the output of the basal ganglia
        (specifically on the connection out of the GPi).
    input_bias : float, optional
        An amount by which to bias all dimensions of the input node.
        Biasing the input node is important for ensuring that all input
        dimensions are positive and easily comparable.
    ampa_synapse : Synapse, optional
        Synapse for connections corresponding to biological connections
        to AMPA receptors (i.e., connections from STN to to GPi and GPe).
    gaba_synapse : Synapse, optional
        Synapse for connections corresponding to biological connections
        to GABA receptors (i.e., connections from StrD1 to GPi, StrD2 to GPe,
        and GPe to GPi and STN).
    kwargs
        Passed through the `nengo_spa.Network`.

    Attributes
    ----------
    bias_input : nengo.Node or None
        If *input_bias* is non-zero, this node will be created to bias
        all of the dimensions of the input signal.
    gpe : nengo.networks.EnsembleArray
        Globus pallidus externus ensembles.
    gpi : nengo.networks.EnsembleArray
        Globus pallidus internus ensembles.
    input : nengo.Node
        Accepts the input signal.
    output : nengo.Node
        Provides the output signal.
    stn : nengo.networks.EnsembleArray
        Subthalamic nucleus ensembles.
    strD1 : nengo.networks.EnsembleArray
        Striatal D1 ensembles.
    strD2 : nengo.networks.EnsembleArray
        Striatal D2 ensembles.

    References
    ----------
    .. [1] Stewart, T. C., Choo, X., & Eliasmith, C. (2010).
       Dynamic behaviour of a spiking model of action selection in the
       basal ganglia. In Proceedings of the 10th international conference on
       cognitive modeling (pp. 235-40).
    .. [2] Gurney, K., Prescott, T., & Redgrave, P. (2001).
       A computational model of action selection in the basal
       ganglia. Biological Cybernetics 84, 401-423.
    """

    input_synapse = SynapseParam('input_synapse', default=Lowpass(0.002))
    ampa_synapse = SynapseParam('ampa_synapse', default=Lowpass(0.002))
    gaba_synapse = SynapseParam('gaba_synapse', default=Lowpass(0.008))
    n_neurons_per_ensemble = IntParam('n_neurons_per_ensemble',
                                      default=100,
                                      low=1,
                                      readonly=True)
    output_weight = NumberParam('output_weight', default=-3., readonly=True)
    input_bias = NumberParam('input_bias', default=0., readonly=True)

    def __init__(self,
                 action_count,
                 n_neurons_per_ensemble=Default,
                 output_weight=Default,
                 input_bias=Default,
                 ampa_synapse=Default,
                 gaba_synapse=Default,
                 sBCBG_params=None,
                 **kwargs):

        kwargs.setdefault('label', "Basal ganglia")
        super(BasalGanglia, self).__init__(**kwargs)

        self.action_count = action_count
        self.input_connections = {}
        self.input_bias = input_bias

        if BasalGanglia.sBCBG:

            # parameters
            filter_tau = .01
            if output_weight == Default:
                self.output_weight = -0.001

            import sBCBG
            if sBCBG_params == None:
                sBCBG_params = {}
            sBCBG.nengo_instantiate(
                self.action_count, self,
                sBCBG_params if sBCBG_params != None else {})

            with self:

                # connect input to CSN
                self.input = nengo.Node(label="input",
                                        size_in=self.action_count)
                scale = nengo.Ensemble(100, self.input.size_out)
                nengo.Connection(self.input, scale)
                for d in range(self.action_count):
                    nengo.Connection(scale[d],
                                     self.pops['CSN'][d],
                                     function=lambda x: 10 * x,
                                     synapse=.01,
                                     label='CSN input')

                # add bias input (BG performs best in the range 0.5--1.5)
                if abs(self.input_bias) > 0.0:
                    self.bias_input = nengo.Node(np.ones(self.action_count) *
                                                 input_bias,
                                                 label="basal ganglia bias")
                    nengo.Connection(self.bias_input, self.input)

                # connect GPi to output (inhibitory)
                decoding_weight = 1  # scaling of decoding GPi->out
                self.output = nengo.Node(label="output",
                                         size_in=self.action_count)
                for d in range(self.action_count):
                    GPi_ens = self.pops["GPi"][d]
                    decoder_values = np.ones(
                        (GPi_ens.n_neurons, 1)) * decoding_weight
                    nengo.Connection(
                        GPi_ens,
                        self.output[d],
                        synapse=nengo.synapses.Lowpass(filter_tau),
                        transform=self.output_weight,
                        #eval_points=eval_points)
                        solver=nengo.solvers.NoSolver(decoder_values))

        else:
            self.n_neurons_per_ensemble = n_neurons_per_ensemble
            self.ampa_synapse = ampa_synapse
            self.gaba_synapse = gaba_synapse
            self.output_weight = output_weight

            # Affects all ensembles / connections in the BG
            # unless overwritten with general_config
            config = nengo.Config(nengo.Ensemble, nengo.Connection)
            config[nengo.Ensemble].radius = 1.5
            config[nengo.Ensemble].encoders = nengo.dists.Choice([[1]])
            try:
                # Best, if we have SciPy
                config[nengo.Connection].solver = nengo.solvers.NnlsL2nz()
            except ImportError:
                warnings.warn("SciPy is not installed, so BasalGanglia will "
                              "use the default decoder solver. Installing "
                              "SciPy may improve BasalGanglia performance.")

            ea_params = {
                'n_neurons': self.n_neurons_per_ensemble,
                'n_ensembles': self.action_count
            }

            with self, config:
                self.strD1 = EnsembleArray(label="Striatal D1 neurons",
                                           intercepts=nengo.dists.Uniform(
                                               Weights.e, 1),
                                           **ea_params)
                self.strD2 = EnsembleArray(label="Striatal D2 neurons",
                                           intercepts=nengo.dists.Uniform(
                                               Weights.e, 1),
                                           **ea_params)
                self.stn = EnsembleArray(label="Subthalamic nucleus",
                                         intercepts=nengo.dists.Uniform(
                                             Weights.ep, 1),
                                         **ea_params)
                self.gpi = EnsembleArray(label="Globus pallidus internus",
                                         intercepts=nengo.dists.Uniform(
                                             Weights.eg, 1),
                                         **ea_params)
                self.gpe = EnsembleArray(label="Globus pallidus externus",
                                         intercepts=nengo.dists.Uniform(
                                             Weights.ee, 1),
                                         **ea_params)

                self.input = nengo.Node(label="input",
                                        size_in=self.action_count)
                self.output = nengo.Node(label="output",
                                         size_in=self.action_count)

                # add bias input (BG performs best in the range 0.5--1.5)
                if abs(self.input_bias) > 0.0:
                    self.bias_input = nengo.Node(np.ones(self.action_count) *
                                                 self.input_bias,
                                                 label="basal ganglia bias")
                    nengo.Connection(self.bias_input, self.input)

                # spread the input to StrD1, StrD2, and STN
                nengo.Connection(self.input,
                                 self.strD1.input,
                                 synapse=None,
                                 transform=Weights.ws * (1 + Weights.lg))
                nengo.Connection(self.input,
                                 self.strD2.input,
                                 synapse=None,
                                 transform=Weights.ws * (1 - Weights.le))
                nengo.Connection(self.input,
                                 self.stn.input,
                                 synapse=None,
                                 transform=Weights.wt)

                # connect the striatum to the GPi and GPe (inhibitory)
                strD1_output = self.strD1.add_output('func_str',
                                                     Weights.str_func)
                strD2_output = self.strD2.add_output('func_str',
                                                     Weights.str_func)
                self.gaba = nengo.Network("GABAergic connections")
                self.gaba.config[nengo.Connection].synapse = self.gaba_synapse
                with self.gaba:
                    nengo.Connection(strD1_output,
                                     self.gpi.input,
                                     transform=-Weights.wm)
                    nengo.Connection(strD2_output,
                                     self.gpe.input,
                                     transform=-Weights.wm)

                # connect the STN to GPi and GPe (broad and excitatory)
                tr = Weights.wp * np.ones(
                    (self.action_count, self.action_count))
                stn_output = self.stn.add_output('func_stn', Weights.stn_func)
                self.ampa = nengo.Network("AMPAergic connectiions")
                self.ampa.config[nengo.Connection].synapse = self.ampa_synapse
                with self.ampa:
                    nengo.Connection(stn_output, self.gpi.input, transform=tr)
                    nengo.Connection(stn_output, self.gpe.input, transform=tr)

                # connect the GPe to GPi and STN (inhibitory)
                gpe_output = self.gpe.add_output('func_gpe', Weights.gpe_func)
                with self.gaba:
                    nengo.Connection(gpe_output,
                                     self.gpi.input,
                                     transform=-Weights.we)
                    nengo.Connection(gpe_output,
                                     self.stn.input,
                                     transform=-Weights.wg)

                # connect GPi to output (inhibitory)
                gpi_output = self.gpi.add_output('func_gpi', Weights.gpi_func)
                nengo.Connection(gpi_output,
                                 self.output,
                                 synapse=None,
                                 transform=self.output_weight)

    def connect_input(self, source, transform=Default, index=None):
        self.input_connections[index] = nengo.Connection(
            source,
            self.input[index],
            transform=transform,
            synapse=self.input_synapse)
Beispiel #28
0
class Connection(NengoObject):
    """Connects two objects together.

    The connection between the two object is unidirectional,
    transmitting information from the first argument, ``pre``,
    to the second argument, ``post``.

    Almost any Nengo object can act as the pre or post side of a connection.
    Additionally, you can use Python slice syntax to access only some of the
    dimensions of the pre or post object.

    For example, if ``node`` has ``size_out=2`` and ``ensemble`` has
    ``size_in=1``, we could not create the following connection::

        nengo.Connection(node, ensemble)

    But, we could create either of these two connections::

        nengo.Connection(node[0], ensemble)
        nengo.Connection(node[1], ensemble)

    Parameters
    ----------
    pre : Ensemble or Neurons or Node
        The source Nengo object for the connection.
    post : Ensemble or Neurons or Node or Probe
        The destination object for the connection.
    synapse : Synapse or None, optional
        Synapse model to use for filtering (see `~nengo.synapses.Synapse`).
        If *None*, no synapse will be used and information will be transmitted
        without any delay (if supported by the backend---some backends may
        introduce a single time step delay).

        Note that at least one connection must have a synapse that is not
        *None* if components are connected in a cycle. Furthermore, a synaptic
        filter with a zero time constant is different from a *None* synapse
        as a synaptic filter will always add a delay of at least one time step.
    function : callable or (n_eval_points, size_mid) array_like, optional
        Function to compute across the connection. Note that ``pre`` must be
        an ensemble to apply a function across the connection.
        If an array is passed, the function is implicitly defined by the
        points in the array and the provided ``eval_points``, which have a
        one-to-one correspondence.
    transform : (size_out, size_mid) array_like, optional
        Linear transform mapping the pre output to the post input.
        This transform is in terms of the sliced size; if either pre
        or post is a slice, the transform must be shaped according to
        the sliced dimensionality. Additionally, the function is applied
        before the transform, so if a function is computed across the
        connection, the transform must be of shape ``(size_out, size_mid)``.
    solver : Solver, optional
        Solver instance to compute decoders or weights
        (see `~nengo.solvers.Solver`). If ``solver.weights`` is True, a full
        connection weight matrix is computed instead of decoders.
    learning_rule_type : LearningRuleType or iterable of LearningRuleType, optional
        Modifies the decoders or connection weights during simulation.
    eval_points : (n_eval_points, size_in) array_like or int, optional
        Points at which to evaluate ``function`` when computing decoders,
        spanning the interval (-pre.radius, pre.radius) in each dimension.
        If None, will use the eval_points associated with ``pre``.
    scale_eval_points : bool, optional
        Indicates whether the evaluation points should be scaled
        by the radius of the pre Ensemble.
    label : str, optional
        A descriptive label for the connection.
    seed : int, optional
        The seed used for random number generation.

    Attributes
    ----------
    is_decoded : bool
        True if and only if the connection is decoded. This will not occur
        when ``solver.weights`` is True or both pre and post are
        `~nengo.ensemble.Neurons`.
    function : callable
        The given function.
    function_size : int
        The output dimensionality of the given function. If no function is
        specified, function_size will be 0.
    label : str
        A human-readable connection label for debugging and visualization.
        If not overridden, incorporates the labels of the pre and post objects.
    learning_rule_type : instance or list or dict of LearningRuleType, optional
        The learning rule types.
    post : Ensemble or Neurons or Node or Probe or ObjView
        The given post object.
    post_obj : Ensemble or Neurons or Node or Probe
        The underlying post object, even if ``post`` is an ``ObjView``.
    post_slice : slice or list or None
        The slice associated with ``post`` if it is an ObjView, or None.
    pre : Ensemble or Neurons or Node or ObjView
        The given pre object.
    pre_obj : Ensemble or Neurons or Node
        The underlying pre object, even if ``post`` is an ``ObjView``.
    pre_slice : slice or list or None
        The slice associated with ``pre`` if it is an ObjView, or None.
    seed : int
        The seed used for random number generation.
    solver : Solver
        The Solver instance that will be used to compute decoders or weights
        (see ``nengo.solvers``).
    synapse : Synapse
        The Synapse model used for filtering across the connection
        (see ``nengo.synapses``).
    transform : (size_out, size_mid) array_like
        Linear transform mapping the pre function output to the post input.

    Properties
    ----------
    size_in : int
        The number of output dimensions of the pre object.
        Also the input size of the function, if one is specified.
    size_mid : int
        The number of output dimensions of the function, if specified.
        If the function is not specified, then ``size_in == size_mid``.
    size_out : int
        The number of input dimensions of the post object.
        Also the number of output dimensions of the transform.
    """

    probeable = ("output", "input", "weights")

    pre = PrePostParam("pre", nonzero_size_out=True)
    post = PrePostParam("post", nonzero_size_in=True)
    synapse = SynapseParam("synapse", default=Lowpass(tau=0.005))
    function_info = ConnectionFunctionParam("function",
                                            default=None,
                                            optional=True)
    transform = ConnectionTransformParam("transform", default=1.0)
    solver = ConnectionSolverParam("solver", default=LstsqL2())
    learning_rule_type = ConnectionLearningRuleTypeParam("learning_rule_type",
                                                         default=None,
                                                         optional=True)
    eval_points = EvalPointsParam("eval_points",
                                  default=None,
                                  optional=True,
                                  sample_shape=("*", "size_in"))
    scale_eval_points = BoolParam("scale_eval_points", default=True)
    modulatory = ObsoleteParam(
        "modulatory",
        "Modulatory connections have been removed. "
        "Connect to a learning rule instead.",
        since="v2.1.0",
        url="https://github.com/nengo/nengo/issues/632#issuecomment-71663849",
    )

    _param_init_order = [
        "pre",
        "post",
        "synapse",
        "eval_points",
        "function_info",
        "transform",
        "solver",
        "learning_rule_type",
    ]

    def __init__(
        self,
        pre,
        post,
        synapse=Default,
        function=Default,
        transform=Default,
        solver=Default,
        learning_rule_type=Default,
        eval_points=Default,
        scale_eval_points=Default,
        label=Default,
        seed=Default,
        modulatory=Unconfigurable,
    ):
        super().__init__(label=label, seed=seed)

        self.pre = pre
        self.post = post

        self.synapse = synapse
        self.eval_points = eval_points  # Must be set before function
        self.scale_eval_points = scale_eval_points
        self.function_info = function
        self.transform = transform  # Must be set after function
        self.solver = solver  # Must be set before learning rule
        self.learning_rule_type = learning_rule_type  # set after transform
        self.modulatory = modulatory

    def __str__(self):
        return self._str(include_id=False)

    def __repr__(self):
        return self._str(include_id=True)

    def _str(self, include_id):
        desc = "<Connection "
        if include_id:
            desc += "at 0x%x " % id(self)

        if self.label is None:
            desc += "from %s to %s%s" % (
                self.pre,
                self.post,
                ("" if self.function is None else " computing '%s'" %
                 (function_name(self.function))),
            )
        else:
            desc += self.label

        desc += ">"

        return desc

    @property
    def function(self):
        return self.function_info.function

    @function.setter
    def function(self, function):
        self.function_info = function

    @property
    def is_decoded(self):
        return not (self.solver.weights or
                    (isinstance(self.pre_obj, Neurons)
                     and isinstance(self.post_obj, Neurons)))

    @property
    def _label(self):
        if self.label is not None:
            return self.label

        return "from %s to %s%s" % (
            self.pre,
            self.post,
            " computing '%s'" %
            function_name(self.function) if self.function is not None else "",
        )

    @property
    def learning_rule(self):
        """(LearningRule or iterable) Connectable learning rule object(s)."""
        if self.learning_rule_type is None:
            return None

        types = self.learning_rule_type
        if isinstance(types, dict):
            learning_rule = type(types)()  # dict of same type
            for k, v in types.items():
                learning_rule[k] = LearningRule(self, v)
        elif is_iterable(types):
            learning_rule = [LearningRule(self, v) for v in types]
        elif isinstance(types, LearningRuleType):
            learning_rule = LearningRule(self, types)
        else:
            raise ValidationError(
                "Invalid type %r" % type(types).__name__,
                attr="learning_rule_type",
                obj=self,
            )

        return learning_rule

    @property
    def post_obj(self):
        return self.post.obj if isinstance(self.post, ObjView) else self.post

    @property
    def post_slice(self):
        return self.post.slice if isinstance(self.post,
                                             ObjView) else slice(None)

    @property
    def pre_obj(self):
        return self.pre.obj if isinstance(self.pre, ObjView) else self.pre

    @property
    def pre_slice(self):
        return self.pre.slice if isinstance(self.pre, ObjView) else slice(None)

    @property
    def size_in(self):
        """(int) The number of output dimensions of the pre object.

        Also the input size of the function, if one is specified.
        """
        return self.pre.size_out

    @property
    def size_mid(self):
        """(int) The number of output dimensions of the function, if specified.

        If the function is not specified, then ``size_in == size_mid``.
        """
        size = self.function_info.size
        return self.size_in if size is None else size

    @property
    def size_out(self):
        """(int) The number of input dimensions of the post object.

        Also the number of output dimensions of the transform.
        """
        return self.post.size_in
Beispiel #29
0
class Connection(NengoObject):
    """Connects two objects together.

    Almost any Nengo object can act as the pre or post side of a connection.
    Additionally, you can use Python slice syntax to access only some of the
    dimensions of the pre or post object.

    For example, if ``node`` has ``size_out=2`` and ``ensemble`` has
    ``size_in=1``, we could not create the following connection::

        nengo.Connection(node, ensemble)

    But, we could create either of these two connections.

        nengo.Connection(node[0], ensemble)
        nengo.Connection(ndoe[1], ensemble)

    Parameters
    ----------
    pre : Ensemble or Neurons or Node
        The source Nengo object for the connection.
    post : Ensemble or Neurons or Node or Probe
        The destination object for the connection.

    label : string
        A descriptive label for the connection.
    dimensions : int
        The number of output dimensions of the pre object, including
        `function`, but not including `transform`.
    eval_points : (n_eval_points, pre_size) array_like or int
        Points at which to evaluate `function` when computing decoders,
        spanning the interval (-pre.radius, pre.radius) in each dimension.
    synapse : float, optional
        Post-synaptic time constant (PSTC) to use for filtering.
    transform : (post_size, pre_size) array_like, optional
        Linear transform mapping the pre output to the post input.
        This transform is in terms of the sliced size; if either pre
        or post is a slice, the transform must be of shape
        (len(pre_slice), len(post_slice)).
    solver : Solver
        Instance of a Solver class to compute decoders or weights
        (see `nengo.solvers`). If solver.weights is True, a full
        connection weight matrix is computed instead of decoders.
    function : callable, optional
        Function to compute using the pre population (pre must be Ensemble).
    modulatory : bool, optional
        Specifies whether the connection is modulatory (does not physically
        connect to post, for use by learning rules), or not (default).
    eval_points : (n_eval_points, pre_size) array_like or int, optional
        Points at which to evaluate `function` when computing decoders,
        spanning the interval (-pre.radius, pre.radius) in each dimension.
    scale_eval_points : bool
        Indicates whether the eval_points should be scaled by the radius of
        the pre Ensemble. Defaults to True.
    learning_rule_type : instance or list or dict of LearningRuleType, optional
        Methods of modifying the connection weights during simulation.

    Attributes
    ----------
    dimensions : int
        The number of output dimensions of the pre object, including
        `function`, but before applying the `transform`.
    function : callable
        The given function.
    function_size : int
        The output dimensionality of the given function. Defaults to 0.
    label : str
        A human-readable connection label for debugging and visualization.
        Incorporates the labels of the pre and post objects.
    learning_rule : LearningRule or collection of LearningRule
        The LearningRule objects corresponding to `learning_rule_type`, and in
        the same format. Use these to probe the learning rules.
    learning_rule_type : instance or list or dict of LearningRuleType, optional
        The learning rule types.
    post : Ensemble or Neurons or Node or Probe
        The given pre object.
    pre : Ensemble or Neurons or Node
        The given pre object.
    transform : (post_size, pre_size) array_like
        Linear transform mapping the pre output to the post input.
    modulatory : bool
        Whether the output of this signal is to act as an error signal for a
        learning rule.
    seed : int
        The seed used for random number generation.
    """

    pre = NengoObjectParam(nonzero_size_out=True)
    post = NengoObjectParam(nonzero_size_in=True)
    synapse = SynapseParam(default=Lowpass(0.005))
    transform = TransformParam(default=np.array(1.0))
    solver = ConnectionSolverParam(default=LstsqL2())
    function_info = ConnectionFunctionParam(default=None, optional=True)
    modulatory = BoolParam(default=False)
    learning_rule_type = ConnectionLearningRuleTypeParam(default=None,
                                                         optional=True)
    eval_points = EvalPointsParam(default=None,
                                  optional=True,
                                  sample_shape=('*', 'size_in'))
    scale_eval_points = BoolParam(default=True)
    seed = IntParam(default=None, optional=True)
    probeable = ListParam(default=['output', 'input', 'transform', 'decoders'])

    def __init__(self,
                 pre,
                 post,
                 synapse=Default,
                 transform=Default,
                 solver=Default,
                 learning_rule_type=Default,
                 function=Default,
                 modulatory=Default,
                 eval_points=Default,
                 scale_eval_points=Default,
                 seed=Default):
        self.pre = pre
        self.post = post

        self.probeable = Default
        self.solver = solver  # Must be set before learning rule
        self.learning_rule_type = learning_rule_type
        self.modulatory = modulatory
        self.synapse = synapse
        self.transform = transform
        self.scale_eval_points = scale_eval_points
        self.eval_points = eval_points  # Must be set before function
        self.function_info = function  # Must be set after transform

    @property
    def function(self):
        return self.function_info.function

    @function.setter
    def function(self, function):
        self.function_info = function

    @property
    def pre_obj(self):
        return self.pre.obj if isinstance(self.pre, ObjView) else self.pre

    @property
    def pre_slice(self):
        return self.pre.slice if isinstance(self.pre, ObjView) else slice(None)

    @property
    def post_obj(self):
        return self.post.obj if isinstance(self.post, ObjView) else self.post

    @property
    def post_slice(self):
        return (self.post.slice
                if isinstance(self.post, ObjView) else slice(None))

    @property
    def size_in(self):
        """Output size of sliced `pre`; input size of the function."""
        return self.pre.size_out

    @property
    def size_mid(self):
        """Output size of the function; input size of the transform.

        If the function is None, then `size_in == size_mid`.
        """
        size = self.function_info.size
        return self.size_in if size is None else size

    @property
    def size_out(self):
        """Output size of the transform; input size to the sliced post."""
        return self.post.size_in

    @property
    def _label(self):
        return "from %s to %s%s" % (self.pre, self.post,
                                    " computing '%s'" % self.function.__name__
                                    if self.function is not None else "")

    def __str__(self):
        return "<Connection %s>" % self._label

    def __repr__(self):
        return "<Connection at 0x%x %s>" % (id(self), self._label)

    @property
    def learning_rule(self):
        if self.learning_rule_type is not None and self._learning_rule is None:
            types = self.learning_rule_type
            if isinstance(types, dict):
                self._learning_rule = types.__class__()  # dict of same type
                for k, v in iteritems(types):
                    self._learning_rule[k] = LearningRule(self, v)
            elif is_iterable(types):
                self._learning_rule = [LearningRule(self, v) for v in types]
            elif isinstance(types, LearningRuleType):
                self._learning_rule = LearningRule(self, types)
            else:
                raise ValueError("Invalid type for `learning_rule_type`: %s" %
                                 (types.__class__.__name__))
        return self._learning_rule
Beispiel #30
0
class Connection(NengoObject):
    """Connects two objects together.

    The connection between the two object is unidirectional,
    transmitting information from the first argument, ``pre``,
    to the second argument, ``post``.

    Almost any Nengo object can act as the pre or post side of a connection.
    Additionally, you can use Python slice syntax to access only some of the
    dimensions of the pre or post object.

    For example, if ``node`` has ``size_out=2`` and ``ensemble`` has
    ``size_in=1``:

    .. testcode::

       with nengo.Network() as net:
           node = nengo.Node(np.zeros(2))
           ensemble = nengo.Ensemble(10, 1)

    We could not create the following connection:

    .. testcode::

       with net:
           nengo.Connection(node, ensemble)

    .. testoutput::
       :hide:

       Traceback (most recent call last):
       ...
       nengo.exceptions.ValidationError: init: Shape of initial value () does not \
       match expected shape (1, 2)

    But, we could create either of these two connections:

    .. testcode::

       with net:
           nengo.Connection(node[0], ensemble)
           nengo.Connection(node[1], ensemble)

    Parameters
    ----------
    pre : Ensemble or Neurons or Node
        The source Nengo object for the connection.
    post : Ensemble or Neurons or Node or LearningRule
        The destination object for the connection.
    synapse : Synapse or None, optional
        Synapse model to use for filtering (see `~nengo.synapses.Synapse`).
        If *None*, no synapse will be used and information will be transmitted
        without any delay (if supported by the backend---some backends may
        introduce a single time step delay).

        Note that at least one connection must have a synapse that is not
        *None* if components are connected in a cycle. Furthermore, a synaptic
        filter with a zero time constant is different from a *None* synapse
        as a synaptic filter will always add a delay of at least one time step.
    function : callable or (n_eval_points, size_mid) array_like, optional
        Function to compute across the connection. Note that ``pre`` must be
        an ensemble to apply a function across the connection.
        If an array is passed, the function is implicitly defined by the
        points in the array and the provided ``eval_points``, which have a
        one-to-one correspondence.
    transform : (size_out, size_mid) array_like, optional
        Linear transform mapping the pre output to the post input.
        This transform is in terms of the sliced size; if either pre
        or post is a slice, the transform must be shaped according to
        the sliced dimensionality. Additionally, the function is applied
        before the transform, so if a function is computed across the
        connection, the transform must be of shape ``(size_out, size_mid)``.
    solver : Solver, optional
        Solver instance to compute decoders or weights
        (see `~nengo.solvers.Solver`). If ``solver.weights`` is True, a full
        connection weight matrix is computed instead of decoders.
    learning_rule_type : LearningRuleType or iterable of LearningRuleType, optional
        Modifies the decoders or connection weights during simulation.
    eval_points : (n_eval_points, size_in) array_like or int, optional
        Points at which to evaluate ``function`` when computing decoders,
        spanning the interval (-pre.radius, pre.radius) in each dimension.
        If None, will use the eval_points associated with ``pre``.
    scale_eval_points : bool, optional
        Indicates whether the evaluation points should be scaled
        by the radius of the pre Ensemble.
    label : str, optional
        A descriptive label for the connection.
    seed : int, optional
        The seed used for random number generation.

    Attributes
    ----------
    function : callable
        The given function.
    function_size : int
        The output dimensionality of the given function. If no function is
        specified, function_size will be 0.
    label : str
        A human-readable connection label for debugging and visualization.
        If not overridden, incorporates the labels of the pre and post objects.
    learning_rule_type : instance or list or dict of LearningRuleType, optional
        The learning rule types.
    post : Ensemble or Neurons or Node or Probe or ObjView
        The given post object.
    post_obj : Ensemble or Neurons or Node or Probe
        The underlying post object, even if ``post`` is an ``ObjView``.
    post_slice : slice or list or None
        The slice associated with ``post`` if it is an ObjView, or None.
    pre : Ensemble or Neurons or Node or ObjView
        The given pre object.
    pre_obj : Ensemble or Neurons or Node
        The underlying pre object, even if ``post`` is an ``ObjView``.
    pre_slice : slice or list or None
        The slice associated with ``pre`` if it is an ObjView, or None.
    seed : int
        The seed used for random number generation.
    solver : Solver
        The Solver instance that will be used to compute decoders or weights
        (see ``nengo.solvers``).
    synapse : Synapse
        The Synapse model used for filtering across the connection
        (see ``nengo.synapses``).
    transform : (size_out, size_mid) array_like
        Linear transform mapping the pre function output to the post input.

    Properties
    ----------
    learning_rule : LearningRule or iterable of LearningRule
        Connectable learning rule object(s) associated with this connection.
    size_in : int
        The number of output dimensions of the pre object.
        Also the input size of the function, if one is specified.
    size_mid : int
        The number of output dimensions of the function, if specified.
        If the function is not specified, then ``size_in == size_mid``.
    size_out : int
        The number of input dimensions of the post object.
        Also the number of output dimensions of the transform.
    """

    probeable = ("output", "input", "weights")

    pre = PrePostParam("pre", nonzero_size_out=True)
    post = PrePostParam("post", nonzero_size_in=True)
    synapse = SynapseParam("synapse", default=Lowpass(tau=0.005))
    function_info = ConnectionFunctionParam("function",
                                            default=None,
                                            optional=True)
    transform = ConnectionTransformParam("transform",
                                         default=None,
                                         optional=True)
    solver = ConnectionSolverParam("solver", default=LstsqL2())
    learning_rule_type = ConnectionLearningRuleTypeParam("learning_rule_type",
                                                         default=None,
                                                         optional=True)
    eval_points = EvalPointsParam("eval_points",
                                  default=None,
                                  optional=True,
                                  sample_shape=("*", "size_in"))
    scale_eval_points = BoolParam("scale_eval_points", default=True)

    _param_init_order = [
        "pre",
        "post",
        "synapse",
        "eval_points",
        "function_info",
        "transform",
        "solver",
        "learning_rule_type",
    ]

    def __init__(
        self,
        pre,
        post,
        synapse=Default,
        function=Default,
        transform=Default,
        solver=Default,
        learning_rule_type=Default,
        eval_points=Default,
        scale_eval_points=Default,
        label=Default,
        seed=Default,
    ):
        super().__init__(label=label, seed=seed)

        self.pre = pre
        self.post = post

        self.synapse = synapse
        self.eval_points = eval_points  # Must be set before function
        self.scale_eval_points = scale_eval_points
        self.function_info = function
        self.transform = transform  # Must be set after function
        self.solver = solver  # Must be set before learning rule
        self.learning_rule_type = learning_rule_type  # set after transform

    def __str__(self):
        return self._str(include_id=False)

    def __repr__(self):
        return self._str(include_id=True)

    def _str(self, include_id):
        desc = "<Connection "
        if include_id:
            desc += f"at 0x{id(self):x} "

        if self.label is None:
            func_txt = ("" if self.function is None else
                        f" computing '{function_name(self.function)}'")
            desc += f"from {self.pre} to {self.post}{func_txt}"
        else:
            desc += self.label

        desc += ">"

        return desc

    @property
    def function(self):
        return self.function_info.function

    @function.setter
    def function(self, function):
        self.function_info = function

    @property
    def has_weights(self):
        return not isinstance(self.transform, NoTransform) or (
            isinstance(self.pre_obj, Ensemble)
            and not isinstance(self.pre_obj.neuron_type, Direct))

    @property
    def is_decoded(self):
        warnings.warn(
            "is_decoded is deprecated; directly check the pre/post objects for the "
            "properties of interest instead",
            DeprecationWarning,
        )
        return not (self.solver.weights or
                    (isinstance(self.pre_obj, Neurons)
                     and isinstance(self.post_obj, Neurons)))

    @property
    def _to_neurons(self):
        return isinstance(self.post_obj, Neurons) or (isinstance(
            self.pre_obj, Ensemble) and isinstance(self.post_obj, Ensemble)
                                                      and self.solver.weights)

    @property
    def learning_rule(self):
        """(LearningRule or iterable) Connectable learning rule object(s)."""
        if self.learning_rule_type is None:
            return None

        types = self.learning_rule_type
        if isinstance(types, dict):
            learning_rule = type(types)()  # dict of same type
            for k, v in types.items():
                learning_rule[k] = LearningRule(self, v)
        elif is_iterable(types):
            learning_rule = [LearningRule(self, v) for v in types]
        elif isinstance(types, LearningRuleType):
            learning_rule = LearningRule(self, types)
        else:
            assert False, "Validation should catch this"

        return learning_rule

    @property
    def post_obj(self):
        return self.post.obj if isinstance(self.post, ObjView) else self.post

    @property
    def post_slice(self):
        return self.post.slice if isinstance(self.post,
                                             ObjView) else slice(None)

    @property
    def pre_obj(self):
        return self.pre.obj if isinstance(self.pre, ObjView) else self.pre

    @property
    def pre_slice(self):
        return self.pre.slice if isinstance(self.pre, ObjView) else slice(None)

    @property
    def size_in(self):
        """(int) The number of output dimensions of the pre object.

        Also the input size of the function, if one is specified.
        """
        return self.pre.size_out

    @property
    def size_mid(self):
        """(int) The number of output dimensions of the function, if specified.

        If the function is not specified, then ``size_in == size_mid``.
        """
        size = self.function_info.size
        return self.size_in if size is None else size

    @property
    def size_out(self):
        """(int) The number of input dimensions of the post object.

        Also the number of output dimensions of the transform.
        """
        return self.post.size_in