コード例 #1
0
def discrete_example(seed, dt):
    n_neurons = 1000
    theta = 0.1
    freq = 50
    q = 27
    radii = 1.0
    sys = PadeDelay(theta, q)

    T = 5000*(dt+0.001)
    rms = 1.0
    signal = WhiteSignal(T, high=freq, rms=rms, y0=0)

    tau = 0.1
    tau_probe = 0.02
    reg = 0.1

    # Determine radii using direct mode
    with LinearNetwork(
            sys, n_neurons_per_ensemble=1, input_synapse=tau, synapse=tau,
            dt=dt, neuron_type=Direct(),
            realizer=Balanced()) as model:
        Connection(Node(output=signal), model.input, synapse=None)
        p_x = Probe(model.state.input, synapse=None)

    with Simulator(model, dt=dt, seed=seed+1) as sim:
        sim.run(T)

    radii *= np.max(abs(sim.data[p_x]), axis=0)
    logging.info("Radii: %s", radii)

    with Network(seed=seed) as model:
        u = Node(output=signal)

        kwargs = dict(
            n_neurons_per_ensemble=n_neurons / len(sys),
            input_synapse=tau, synapse=tau, radii=radii,
            solver=LstsqL2(reg=reg), realizer=Balanced())
        delay_disc = LinearNetwork(sys, dt=dt, **kwargs)
        delay_cont = LinearNetwork(sys, dt=None, **kwargs)
        Connection(u, delay_disc.input, synapse=None)
        Connection(u, delay_cont.input, synapse=None)

        p_u = Probe(u, synapse=tau_probe)
        p_y_disc = Probe(delay_disc.output, synapse=tau_probe)
        p_y_cont = Probe(delay_cont.output, synapse=tau_probe)

    with Simulator(model, dt=dt, seed=seed) as sim:
        sim.run(T)

    return (theta, dt, sim.trange(), sim.data[p_u],
            sim.data[p_y_disc], sim.data[p_y_cont])
コード例 #2
0
    def generate_conns(self):
        """Generate the set of direct Connections replacing this Cluster."""
        outputs = {}
        for c in self.conns_in | self.conns_mid | self.conns_out:
            pre = c.pre_obj
            if pre not in outputs:
                outputs[pre] = set([c])
            else:
                outputs[pre].add(c)

        for c in self.conns_in:
            assert c.post_obj in self.objs
            for k, (pre_slice, transform, synapse, post) in enumerate(
                self.generate_from(c.post_obj, outputs)
            ):
                syn = self.merge_synapses(c.synapse, synapse)
                trans = self.merge_transforms(
                    c.post_obj,
                    [c.size_mid, post.size_in],
                    [c.transform, transform],
                    [c.post_slice, pre_slice],
                )

                if not np.allclose(trans.init, 0):
                    yield Connection(
                        pre=c.pre,
                        post=post,
                        function=c.function,
                        eval_points=c.eval_points,
                        scale_eval_points=c.scale_eval_points,
                        synapse=syn,
                        transform=trans,
                        add_to_container=False,
                        label=(None if c.label is None else "%s_%d" % (c.label, k)),
                    )
コード例 #3
0
ファイル: splitter.py プロジェクト: colinshane/nengo-loihi
def split_host_to_learning_rule(networks, conn):
    dim = conn.size_out
    logger.debug("Creating HostSendNode for %s", conn)
    send = HostSendNode(
        dim,
        label=None if conn.label is None else "%s_send" % conn.label,
        add_to_container=False,
    )
    networks.add(send, "host")

    pre2send = Connection(
        conn.pre,
        send,
        function=conn.function,
        solver=conn.solver,
        eval_points=conn.eval_points,
        scale_eval_points=conn.scale_eval_points,
        synapse=conn.synapse,
        transform=conn.transform,
        label=conn.label,
        add_to_container=False,
    )
    networks.add(pre2send, "host")
    pes_target = networks.needs_sender[conn.post_obj]
    networks.host2chip_senders[send] = pes_target
    networks.remove(conn)
コード例 #4
0
def build_host_to_learning_rule(model, conn):
    if not is_transform_type(conn.transform, ("Dense", "NoTransform")):
        # TODO: What needs to be done to support this? It looks like it should just work
        raise BuildError(
            f"{conn}: nengo-loihi does not yet support "
            f"'{type(conn.transform).__name__}' transforms on host to chip "
            "learning rule connections"
        )

    dim = conn.size_out
    host = model.host_model(base_obj(conn.pre))

    logger.debug("Creating HostSendNode for %s", conn)
    send = HostSendNode(
        dim,
        label=None if conn.label is None else "%s_send" % conn.label,
        add_to_container=False,
    )
    host.build(send)

    pre2send = Connection(
        conn.pre,
        send,
        function=conn.function,
        solver=conn.solver,
        eval_points=conn.eval_points,
        scale_eval_points=conn.scale_eval_points,
        synapse=conn.synapse,
        transform=conn.transform,
        label=conn.label,
        add_to_container=False,
    )
    model.host2chip_pes_senders[send] = model.needs_sender[conn.post_obj]
    _inherit_seed(host, pre2send, model, conn)
    host.build(pre2send)
コード例 #5
0
ファイル: connection.py プロジェクト: trigrass2/nengo-loihi
def build_host_to_learning_rule(model, conn):
    if (nengo_transforms is not None
            and not isinstance(conn.transform, nengo_transforms.Dense)):
        raise BuildError("nengo-loihi does not yet support %r transforms "
                         "on host to chip learning rule connections" %
                         (type(conn.transform).__name__, ))

    dim = conn.size_out
    host = model.host_model(base_obj(conn.pre))

    logger.debug("Creating HostSendNode for %s", conn)
    send = HostSendNode(
        dim,
        label=None if conn.label is None else "%s_send" % conn.label,
        add_to_container=False,
    )
    host.build(send)

    pre2send = Connection(
        conn.pre,
        send,
        function=conn.function,
        solver=conn.solver,
        eval_points=conn.eval_points,
        scale_eval_points=conn.scale_eval_points,
        synapse=conn.synapse,
        transform=conn.transform,
        label=conn.label,
        add_to_container=False,
    )
    pes_target = model.needs_sender[conn.post_obj]
    model.host2chip_senders[send] = pes_target
    _inherit_seed(host, pre2send, model, conn)
    host.build(pre2send)
コード例 #6
0
def build_host_neurons_to_chip(model, conn):
    """Send spikes over and do the rest of the connection on-chip"""

    assert not isinstance(conn.post, LearningRule)
    dim = conn.size_in
    host = model.host_model(base_obj(conn.pre))

    logger.debug("Creating ChipReceiveNeurons for %s", conn)
    receive = ChipReceiveNeurons(
        dim,
        neuron_type=conn.pre_obj.ensemble.neuron_type,
        label=None if conn.label is None else "%s_neurons" % conn.label,
        add_to_container=False,
    )
    _inherit_seed(model, receive, model, conn)
    model.builder.build(model, receive)

    receive2post = Connection(
        receive,
        conn.post,
        transform=conn.transform,
        synapse=conn.synapse,
        label=None if conn.label is None else "%s_chip" % conn.label,
        add_to_container=False,
    )
    _inherit_seed(model, receive2post, model, conn)
    _inherit_config(model, receive2post, model, conn)
    build_chip_connection(model, receive2post)

    logger.debug("Creating HostSendNode for %s", conn)
    send = HostSendNode(
        dim,
        label=None if conn.label is None else "%s_send" % conn.label,
        add_to_container=False,
    )
    host.build(send)

    pre2send = Connection(
        conn.pre,
        send,
        synapse=None,
        label=None if conn.label is None else "%s_host" % conn.label,
        add_to_container=False,
    )
    model.host2chip_senders[send] = receive
    _inherit_seed(host, pre2send, model, conn)
    host.build(pre2send)
コード例 #7
0
def time_cells(order):
    seed = 0
    n_neurons = 300
    theta = 4.784
    tau = 0.1
    radius = 0.3
    realizer = Balanced


    # The following was patched from nengolib commit
    # 7e204e0c305e34a4f63d0a6fbba7197862bbcf22, prior to
    # aee92b8fc45749f07f663fe696745cf0a33bfa17, so that
    # the generated PDF is consistent with the version that the
    # overlay was added to.
    def PadeDelay(c, q):
        j = np.arange(1, q+1, dtype=np.float64)
        u = (q + j - 1) * (q - j + 1) / (c * j)

        A = np.zeros((q, q))
        B = np.zeros((q, 1))
        C = np.zeros((1, q))
        D = np.zeros((1,))

        A[0, :] = B[0, 0] = -u[0]
        A[1:, :-1][np.diag_indices(q-1)] = u[1:]
        C[0, :] = - j / float(q) * (-1) ** (q - j)
        return LinearSystem((A, B, C, D), analog=True)

    F = PadeDelay(theta, order)
    synapse = Alpha(tau)

    pulse_s = 0
    pulse_w = 1.0
    pulse_h = 1.5

    T = 6.0
    dt = 0.001
    pulse = np.zeros(int(T/dt))
    pulse[int(pulse_s/dt):int((pulse_s + pulse_w)/dt)] = pulse_h

    with Network(seed=seed) as model:
        u = Node(output=PresentInput(pulse, dt))

        delay = LinearNetwork(
            F, n_neurons_per_ensemble=n_neurons / len(F), synapse=synapse,
            input_synapse=None, radii=radius, dt=dt, realizer=realizer())
        Connection(u, delay.input, synapse=None)

        p_x = Probe(delay.state.input, synapse=None)
        p_a = Probe(delay.state.add_neuron_output(), synapse=None)

    with Simulator(model, dt=dt) as sim:
        sim.run(T)

    return sim.trange(), sim.data[p_x], sim.data[p_a]
コード例 #8
0
ファイル: splitter.py プロジェクト: colinshane/nengo-loihi
def split_host_neurons_to_chip(networks, conn):
    """Send spikes over and do the rest of the connection on-chip"""

    assert not isinstance(conn.post, LearningRule)
    dim = conn.size_in

    logger.debug("Creating ChipReceiveNeurons for %s", conn)
    receive = ChipReceiveNeurons(
        dim,
        neuron_type=conn.pre_obj.ensemble.neuron_type,
        label=None if conn.label is None else "%s_neurons" % conn.label,
        add_to_container=False,
    )
    networks.add(receive, "chip")
    receive2post = Connection(
        receive,
        conn.post,
        transform=conn.transform,
        synapse=conn.synapse,
        label=None if conn.label is None else "%s_chip" % conn.label,
        add_to_container=False,
    )
    networks.add(receive2post, "chip")

    logger.debug("Creating HostSendNode for %s", conn)
    send = HostSendNode(
        dim,
        label=None if conn.label is None else "%s_send" % conn.label,
        add_to_container=False,
    )
    networks.add(send, "host")
    pre2send = Connection(
        conn.pre,
        send,
        synapse=None,
        label=None if conn.label is None else "%s_host" % conn.label,
        add_to_container=False,
    )
    networks.add(pre2send, "host")

    networks.host2chip_senders[send] = receive
    networks.remove(conn)
コード例 #9
0
def build_chip_to_host(model, conn):
    if not is_transform_type(conn.transform, ("Dense", "NoTransform")):
        raise BuildError(
            f"{conn}: nengo-loihi does not yet support "
            f"'{type(conn.transform).__name__}' transforms on chip to host connections"
        )

    rng = np.random.RandomState(model.seeds[conn])
    dim = conn.size_out
    host = model.host_model(base_obj(conn.post))

    logger.debug("Creating HostReceiveNode for %s", conn)
    receive = HostReceiveNode(
        dim,
        label=None if conn.label is None else "%s_receive" % conn.label,
        add_to_container=False,
    )
    host.build(receive)

    receive2post = Connection(
        receive,
        conn.post,
        synapse=conn.synapse,
        label=None if conn.label is None else "%s_host" % conn.label,
        add_to_container=False,
    )
    _inherit_seed(host, receive2post, model, conn)
    host.build(receive2post)

    logger.debug("Creating Probe for %s", conn)
    transform = sample_transform(conn, rng=rng)

    probe = NengoProbe(
        conn.pre, synapse=None, solver=conn.solver, add_to_container=False
    )
    model.chip2host_params[probe] = dict(
        learning_rule_type=conn.learning_rule_type,
        function=conn.function,
        eval_points=conn.eval_points,
        scale_eval_points=conn.scale_eval_points,
        transform=transform,
        label=None if conn.label is None else "%s_probe" % conn.label,
    )
    model.chip2host_receivers[probe] = receive
    _inherit_seed(model, probe, model, conn)
    model.builder.build(model, probe)

    if conn.learning_rule_type is not None:
        if not isinstance(conn.pre_obj, Ensemble):
            raise NotImplementedError(
                "Learning rule presynaptic object must be an Ensemble "
                "(got %r)" % type(conn.pre_obj).__name__
            )
        model.needs_sender[conn.learning_rule] = PESModulatoryTarget(probe)
コード例 #10
0
def delayed_synapse():
    a = 0.1  # desired delay
    b = 0.01  # synapse delay
    tau = 0.01  # recurrent tau
    hz = 15  # input frequency
    t = 1.0  # simulation time
    dt = 0.00001  # simulation timestep
    order = 6  # order of pade approximation
    tau_probe = 0.02

    dexp_synapse = DoubleExp(tau, tau / 5)

    sys_lambert = lambert_delay(a, b, tau, order - 1, order)
    synapse = (cont2discrete(Lowpass(tau), dt=dt) *
               DiscreteDelay(int(b / dt)))

    n_neurons = 2000
    neuron_type = PerfectLIF()

    A, B, C, D = sys_lambert.observable.transform(5*np.eye(order)).ss

    sys_normal = PadeDelay(a, order)
    assert len(sys_normal) == order

    with Network(seed=0) as model:
        stim = Node(output=WhiteSignal(t, high=hz, y0=0))

        x = EnsembleArray(n_neurons / order, len(A), neuron_type=neuron_type)
        output = Node(size_in=1)

        Connection(x.output, x.input, transform=A, synapse=synapse)
        Connection(stim, x.input, transform=B, synapse=synapse)
        Connection(x.output, output, transform=C, synapse=None)
        Connection(stim, output, transform=D, synapse=None)

        lowpass_delay = LinearNetwork(
            sys_normal, n_neurons_per_ensemble=n_neurons / order,
            synapse=tau, input_synapse=tau,
            dt=None, neuron_type=neuron_type, radii=1.0)
        Connection(stim, lowpass_delay.input, synapse=None)

        dexp_delay = LinearNetwork(
            sys_normal, n_neurons_per_ensemble=n_neurons / order,
            synapse=dexp_synapse, input_synapse=dexp_synapse,
            dt=None, neuron_type=neuron_type, radii=1.0)
        Connection(stim, dexp_delay.input, synapse=None)

        p_stim = Probe(stim, synapse=tau_probe)
        p_output_delayed = Probe(output, synapse=tau_probe)
        p_output_lowpass = Probe(lowpass_delay.output, synapse=tau_probe)
        p_output_dexp = Probe(dexp_delay.output, synapse=tau_probe)

    with Simulator(model, dt=dt, seed=0) as sim:
        sim.run(t)

    return (a, dt, sim.trange(), sim.data[p_stim],
            sim.data[p_output_delayed], sim.data[p_output_lowpass],
            sim.data[p_output_dexp])
コード例 #11
0
ファイル: tensor_node.py プロジェクト: rsantana-isg/nengo_dl
def tensor_layer(input, layer_func, shape_in=None, synapse=None,
                 transform=1, **layer_args):
    """A utility function to construct TensorNodes that apply some function
    to their input (analogous to the ``tf.layers`` syntax).

    Parameters
    ----------
    input : :class:`~nengo:nengo.base.NengoObject`
        object providing input to the layer
    layer_func : callable or :class:`~nengo:nengo.neurons.NeuronType`
        a function that takes the value from ``input`` (represented as a
        ``tf.Tensor``) and maps it to some output value. or a Nengo neuron
        type, defining a nonlinearity that will be applied to ``input``
    shape_in : tuple of int, optional
        if not None, reshape the input to the given shape
    synapse : float or :class:`~nengo:nengo.synapses.Synapse`, optional
        synapse to apply on connection from ``input`` to this layer
    transform : :class:`~numpy:numpy.ndarray`, optional
        transform matrix to apply on connection from ``input`` to this layer
    layer_args : dict, optional
        these arguments will be passed to ``layer_func`` if it is callable, or
        :class:`~nengo:nengo.Ensemble` if ``layer_func`` is a
        :class:`~nengo:nengo.neurons.NeuronType`

    Returns
    -------
    :class:`.TensorNode` or :class:`~nengo:nengo.ensemble.Neurons`
        a TensorNode that implements the given layer function (if
        ``layer_func`` was a callable), or a Neuron object with the given
        neuron type, connected to ``input``
    """

    if isinstance(layer_func, NeuronType):
        node = Ensemble(input.size_out, 1, neuron_type=layer_func,
                        **layer_args).neurons
    else:
        # add (ignored) time input and pass kwargs
        def node_func(_, x):
            return layer_func(x, **layer_args)

        # reshape input if necessary
        if shape_in is not None:
            node_func = reshaped(shape_in)(node_func)

        node = TensorNode(node_func, size_in=input.size_out)

    Connection(input, node, synapse=synapse, transform=transform)

    return node
コード例 #12
0
ファイル: splitter.py プロジェクト: colinshane/nengo-loihi
def split_chip_to_host(networks, conn):
    dim = conn.size_out

    logger.debug("Creating HostReceiveNode for %s", conn)
    receive = HostReceiveNode(
        dim,
        label=None if conn.label is None else "%s_receive" % conn.label,
        add_to_container=False,
    )
    networks.add(receive, "host")
    receive2post = Connection(
        receive,
        conn.post,
        synapse=conn.synapse,
        label=None if conn.label is None else "%s_host" % conn.label,
        add_to_container=False,
    )
    networks.add(receive2post, "host")

    logger.debug("Creating Probe for %s", conn)
    seed = networks.original.seed if conn.seed is None else conn.seed
    transform = sample_transform(conn, rng=np.random.RandomState(seed=seed))

    probe = Probe(conn.pre,
                  synapse=None,
                  solver=conn.solver,
                  add_to_container=False)
    networks.chip2host_params[probe] = dict(
        learning_rule_type=conn.learning_rule_type,
        function=conn.function,
        eval_points=conn.eval_points,
        scale_eval_points=conn.scale_eval_points,
        transform=transform,
        label=None if conn.label is None else "%s_probe" % conn.label,
    )
    networks.add(probe, "chip")
    networks.chip2host_receivers[probe] = receive

    if conn.learning_rule_type is not None:
        if not isinstance(conn.pre_obj, Ensemble):
            raise NotImplementedError(
                "Learning rule presynaptic object must be an Ensemble "
                "(got %r)" % type(conn.pre_obj).__name__)
        networks.needs_sender[conn.learning_rule] = PESModulatoryTarget(probe)
    networks.remove(conn)
コード例 #13
0
ファイル: builder.py プロジェクト: xlong0513/nengo
def _create_replacement_connection(c_in, c_out):
    """Generate a new Connection to replace two through a passthrough Node."""
    # imported here to avoid circular imports
    from nengo import Connection  # pylint: disable=import-outside-toplevel

    assert c_in.post_obj is c_out.pre_obj
    assert c_in.post_obj.output is None

    # determine the filter for the new Connection
    if c_in.synapse is None:
        synapse = c_out.synapse
    elif c_out.synapse is None:
        synapse = c_in.synapse
    else:
        raise Unconvertible("Cannot merge two filters")
        # Note: the algorithm below is in the right ballpark,
        #  but isn't exactly the same as two low-pass filters
        # filter = c_out.filter + c_in.filter

    function = c_in.function
    if c_out.function is not None:
        raise Unconvertible("Cannot remove a connection with a function")

    # compute the combined transform
    transform = np.dot(full_transform(c_out), full_transform(c_in))

    # check if the transform is 0 (this happens a lot
    #  with things like identity transforms)
    if np.all(transform == 0):
        return None

    c = Connection(
        c_in.pre_obj,
        c_out.post_obj,
        synapse=synapse,
        transform=transform,
        function=function,
        add_to_container=False,
    )
    return c
コード例 #14
0
ファイル: tensor_node.py プロジェクト: Sreerag-ibtl/nengo-dl
    def __call__(
        self,
        input,
        transform=default_transform,
        shape_in=None,
        synapse=None,
        return_conn=False,
        **layer_args
    ):
        """
        Apply the TensorNode layer to the given input object.

        Parameters
        ----------
        input : ``NengoObject``
            Object providing input to the layer.
        transform : `~numpy.ndarray`
            Transform matrix to apply on connection from ``input`` to this layer.
        shape_in : tuple of int
            If not None, reshape the input to the given shape.
        synapse : float or `~nengo.synapses.Synapse`
            Synapse to apply on connection from ``input`` to this layer.
        return_conn : bool
            If True, also return the connection linking this layer to ``input``.
        layer_args : dict
            These arguments will be passed to `.TensorNode` if ``layer_func`` is a
            callable or Keras Layer, or `~nengo.Ensemble` if ``layer_func`` is a
            `~nengo.neurons.NeuronType`.

        Returns
        -------
        obj : `.TensorNode` or `~nengo.ensemble.Neurons`
            A TensorNode that implements the given layer function (if
            ``layer_func`` was a callable/Keras layer), or a Neuron object with the
            given neuron type, connected to ``input``.
        conn : `~nengo.Connection`
            If ``return_conn`` is True, also returns the connection object linking
            ``input`` and ``obj``.

        Notes
        -----
        The input connection created for the new TensorNode will be marked as
        non-trainable by default.
        """

        if shape_in is not None and all(x is not None for x in shape_in):
            size_in = np.prod(shape_in)
        elif isinstance(transform, np.ndarray) and transform.ndim == 2:
            size_in = transform.shape[0]
        else:
            size_in = input.size_out

        if isinstance(self.layer_func, NeuronType):
            obj = Ensemble(
                size_in, 1, neuron_type=self.layer_func, **layer_args
            ).neurons
        else:
            obj = TensorNode(
                self.layer_func,
                shape_in=(size_in,) if shape_in is None else shape_in,
                pass_time=False,
                **layer_args,
            )

        conn = Connection(input, obj, synapse=synapse, transform=transform)

        # set connection to non-trainable
        cfg = Config.context[0][conn]
        if not hasattr(cfg, "trainable"):
            configure_settings(trainable=None)
        cfg.trainable = False

        return (obj, conn) if return_conn else obj
コード例 #15
0
from matplotlib.pyplot import plot, show
from nengo import Connection, Ensemble, Network, Node, Probe
# from nengo.utils.simulator import operator_dependency_graph
from nengo_dl import Simulator
from numpy import sin

# define the model
with Network() as model:
    stim = Node(sin)
    a = Ensemble(100, 1)
    b = Ensemble(100, 1)
    Connection(stim, a)
    Connection(a, b, function=lambda x: x**2)

    probe_a = Probe(a, synapse=0.01)
    probe_b = Probe(b, synapse=0.01)

# build and run the model
with Simulator(model) as sim:
    sim.run(10)

# plot the results
plot(sim.trange(), sim.data[probe_a])
plot(sim.trange(), sim.data[probe_b])
show()
コード例 #16
0
def conn_probe(model, nengo_probe):
    # Connection probes create a connection from the target, and probe
    # the resulting signal (used when you want to probe the default
    # output of an object, which may not have a predefined signal)

    synapse = 0  # Removed internal filtering

    # get any extra arguments if this probe was created to send data
    #  to an off-chip Node via the splitter

    conn_label = None if nengo_probe.label is None else "%s_conn" % nengo_probe.label
    kwargs = model.chip2host_params.get(nengo_probe, None)
    if kwargs is not None:
        # this probe is for sending data to a Node
        kwargs.setdefault("label", conn_label)

        # determine the dimensionality
        input_dim = nengo_probe.target.size_out
        func = kwargs["function"]
        if func is not None:
            if callable(func):
                input_dim = np.asarray(
                    func(np.zeros(input_dim, dtype=np.float64))).size
            else:
                input_dim = len(func[0])
        transform = np.asarray(kwargs["transform"], dtype=np.float64)
        if transform.ndim <= 1:
            output_dim = input_dim
        elif transform.ndim == 2:
            assert transform.shape[1] == input_dim
            output_dim = transform.shape[0]
        else:
            raise NotImplementedError()

        target = nengo.Node(size_in=output_dim, add_to_container=False)
        # TODO: This is a hack so that the builder can properly delegate the
        # connection build to the right method
        model.split.hostchip.chip_objs.add(target)

        conn = Connection(
            nengo_probe.target,
            target,
            synapse=synapse,
            solver=nengo_probe.solver,
            add_to_container=False,
            **kwargs,
        )
        model.nengo_probe_conns[nengo_probe] = conn
    else:
        conn = Connection(
            nengo_probe.target,
            nengo_probe,
            synapse=synapse,
            solver=nengo_probe.solver,
            add_to_container=False,
            label=conn_label,
        )
        target = nengo_probe

    # Set connection's seed to probe's (which isn't used elsewhere)
    model.seeded[conn] = model.seeded[nengo_probe]
    model.seeds[conn] = model.seeds[nengo_probe]

    d = conn.size_out
    if isinstance(nengo_probe.obj, Ensemble):
        # probed values are scaled by the target ensemble's radius
        scale = nengo_probe.obj.radius
        w = np.diag(scale * np.ones(d))
        weights = np.vstack([w, -w])
    else:
        raise NotImplementedError(
            "Nodes cannot be onchip, connections not yet probeable")

    # probe target will be set when we build the connection below
    probe = LoihiProbe(target=[None],
                       key="voltage",
                       weights=[weights],
                       synapse=nengo_probe.synapse)
    model.objs[target]["in"] = probe
    model.objs[target]["out"] = probe

    # add an extra entry for simulator.run_steps to read data out
    model.objs[nengo_probe]["out"] = probe

    # Build the connection (sets probe targets, adds probe)
    model.build(conn)
コード例 #17
0
ファイル: tensor_node.py プロジェクト: kstandvoss/nengo-dl
def tensor_layer(input,
                 layer_func,
                 shape_in=None,
                 synapse=None,
                 transform=1,
                 return_conn=False,
                 **layer_args):
    """A utility function to construct TensorNodes that apply some function
    to their input (analogous to the ``tf.layers`` syntax).

    Parameters
    ----------
    input : ``NengoObject``
        Object providing input to the layer
    layer_func : callable or `~nengo.neurons.NeuronType`
        A function that takes the value from ``input`` (represented as a
        ``tf.Tensor``) and maps it to some output value, or a Nengo neuron
        type, defining a nonlinearity that will be applied to ``input``.
    shape_in : tuple of int
        If not None, reshape the input to the given shape
    synapse : float or `~nengo.synapses.Synapse`
        Synapse to apply on connection from ``input`` to this layer
    transform : `~numpy.ndarray`
        Transform matrix to apply on connection from ``input`` to this layer
    return_conn : bool
        If True, also return the connection linking this layer to ``input``
    layer_args : dict
        These arguments will be passed to ``layer_func`` if it is callable, or
        `~nengo.Ensemble` if ``layer_func`` is a `~nengo.neurons.NeuronType`

    Returns
    -------
    node : `.TensorNode` or `~nengo.ensemble.Neurons`
        A TensorNode that implements the given layer function (if
        ``layer_func`` was a callable), or a Neuron object with the given
        neuron type, connected to ``input``
    conn : `~nengo.Connection`
        If ``return_conn`` is True, also returns the connection object linking
        ``input`` and ``node``.
    """

    if isinstance(transform, np.ndarray) and transform.ndim == 2:
        size_in = transform.shape[0]
    elif shape_in is not None:
        size_in = np.prod(shape_in)
    else:
        size_in = input.size_out

    if isinstance(layer_func, NeuronType):
        node = Ensemble(size_in, 1, neuron_type=layer_func,
                        **layer_args).neurons
    else:
        # add (ignored) time input and pass kwargs
        def node_func(_, x):
            return layer_func(x, **layer_args)

        # reshape input if necessary
        if shape_in is not None:
            node_func = reshaped(shape_in)(node_func)

        node = TensorNode(node_func, size_in=size_in)

    conn = Connection(input, node, synapse=synapse, transform=transform)

    return (node, conn) if return_conn else node
コード例 #18
0
ファイル: splitter.py プロジェクト: colinshane/nengo-loihi
def split_host_to_chip(networks, conn):
    dim = conn.size_out
    logger.debug("Creating ChipReceiveNode for %s", conn)
    receive = ChipReceiveNode(
        dim * 2,
        size_out=dim,
        label=None if conn.label is None else "%s_node" % conn.label,
        add_to_container=False,
    )
    networks.add(receive, "chip")
    receive2post = Connection(
        receive,
        conn.post,
        synapse=networks.node_tau,
        label=None if conn.label is None else "%s_chip" % conn.label,
        add_to_container=False,
    )
    networks.add(receive2post, "chip")

    logger.debug("Creating DecodeNeuron ensemble for %s", conn)
    if networks.node_neurons is None:
        raise BuildError(
            "DecodeNeurons must be specified for host->chip connection.")
    ens = networks.node_neurons.get_ensemble(dim)
    ens.label = None if conn.label is None else "%s_ens" % conn.label
    networks.add(ens, "host")

    if nengo_transforms is not None and isinstance(
            conn.transform, nengo_transforms.Convolution):
        raise BuildError(
            "Conv2D transforms not supported for off-chip to "
            "on-chip connections where `pre` is not a Neurons object.")

    # Scale the input spikes based on the radius of the target ensemble
    seed = networks.original.seed if conn.seed is None else conn.seed
    weights = sample_transform(conn, rng=np.random.RandomState(seed=seed))

    if isinstance(conn.post_obj, Ensemble):
        weights = weights / conn.post_obj.radius

    if nengo_transforms is None:
        transform = weights
    else:
        # copy the Transform information, setting `init` to the sampled weights
        transform = copy.copy(conn.transform)
        type(transform).init.data[transform] = weights

    pre2ens = Connection(
        conn.pre,
        ens,
        function=conn.function,
        solver=conn.solver,
        eval_points=conn.eval_points,
        scale_eval_points=conn.scale_eval_points,
        synapse=conn.synapse,
        transform=transform,
        label=None if conn.label is None else "%s_enc" % conn.label,
        add_to_container=False,
    )
    networks.add(pre2ens, "host")

    logger.debug("Creating HostSendNode for %s", conn)
    send = HostSendNode(
        dim * 2,
        label=None if conn.label is None else "%s_send" % conn.label,
        add_to_container=False,
    )
    networks.add(send, "host")
    ensneurons2send = Connection(
        ens.neurons,
        send,
        synapse=None,
        label=None if conn.label is None else "%s_host" % conn.label,
        add_to_container=False,
    )
    networks.add(ensneurons2send, "host")
    networks.remove(conn)

    networks.host2chip_senders[send] = receive
コード例 #19
0
def build_host_to_chip(model, conn):
    rng = np.random.RandomState(model.seeds[conn])
    host = model.host_model(base_obj(conn.pre))

    if is_transform_type(conn.transform, ("Convolution", "ConvolutionTranspose")):
        raise BuildError(
            f"{conn}: Conv2D transforms not supported for off-chip to "
            "on-chip connections where `pre` is not a Neurons object."
        )
    elif not is_transform_type(conn.transform, ("Dense", "NoTransform")):
        raise BuildError(
            f"{conn}: nengo-loihi does not yet support "
            f"'{type(conn.transform).__name__}' transforms on host to chip connections"
        )

    # Scale the input spikes based on the radius of the target ensemble
    weights = sample_transform(conn, rng=rng)

    if isinstance(conn.post_obj, Ensemble):
        weights = weights / conn.post_obj.radius

    if is_transform_type(conn.transform, "NoTransform"):
        transform = weights  # weights are 1 / (post ensemble radius), if applicable
    else:
        # copy the Transform information, setting `init` to the sampled weights
        transform = copy.copy(conn.transform)
        type(transform).init.data[transform] = weights

    if isinstance(conn.post_obj, Neurons):
        # we don't have encoders, and the transform could have large output,
        # so do it on the chip
        host_transform = 1.0
        chip_transform = transform
        dim = conn.size_mid
    else:
        # we have encoders on the chip, so do the transform off-chip
        host_transform = transform
        chip_transform = 1.0
        dim = conn.size_out

    logger.debug("Creating ChipReceiveNode for %s", conn)
    receive = ChipReceiveNode(
        dim * 2,
        size_out=dim,
        label=None if conn.label is None else "%s_node" % conn.label,
        add_to_container=False,
    )
    model.builder.build(model, receive)

    receive2post = Connection(
        receive,
        conn.post,
        transform=chip_transform,
        synapse=model.decode_tau,
        label=None if conn.label is None else "%s_chip" % conn.label,
        add_to_container=False,
    )
    _inherit_seed(model, receive2post, model, conn)
    _inherit_config(model, receive2post, model, conn)
    build_chip_connection(model, receive2post)

    logger.debug("Creating DecodeNeuron ensemble for %s", conn)
    ens = model.node_neurons.get_ensemble(dim, add_to_container=False)
    ens.label = None if conn.label is None else "%s_ens" % conn.label
    _inherit_seed(host, ens, model, conn)
    host.build(ens)
    model.connection_decode_neurons[conn] = ens

    pre2ens = Connection(
        conn.pre,
        ens,
        function=conn.function,
        solver=conn.solver,
        eval_points=conn.eval_points,
        scale_eval_points=conn.scale_eval_points,
        synapse=conn.synapse,
        transform=host_transform,
        label=None if conn.label is None else "%s_enc" % conn.label,
        add_to_container=False,
    )
    _inherit_seed(host, pre2ens, model, conn)
    host.build(pre2ens)

    logger.debug("Creating HostSendNode for %s", conn)
    send = HostSendNode(
        dim * 2,
        label=None if conn.label is None else "%s_send" % conn.label,
        add_to_container=False,
    )
    host.build(send)

    ensneurons2send = Connection(
        ens.neurons,
        send,
        synapse=None,
        label=None if conn.label is None else "%s_host" % conn.label,
        add_to_container=False,
    )
    _inherit_seed(host, ensneurons2send, model, conn)
    model.host2chip_senders[send] = receive
    host.build(ensneurons2send)
コード例 #20
0
def delay_example():
    seed = 2

    n_neurons = 1000
    theta = 1.0
    sys = PadeDelay(theta, 6)

    T = 20.0
    dt = 0.001
    freq = 1
    rms = 0.4

    tau = 0.1
    #tau_probe = 0.02

    radii = np.ones(len(sys))  # initial guess
    desired_radius = 0.8  # aiming to get this as largest x
    num_iter = 5  # number of times to simulate and retry new radius

    # could also do this simply by the direct method in discrete_example
    # but this is just to demonstrate that you can do something iterative
    # within the same network
    for _ in range(num_iter):
        with Network(seed=seed) as model:
            signal = WhiteSignal(T, high=freq, rms=rms, y0=0)
            u = Node(output=signal)

            delay = LinearNetwork(
                sys, n_neurons_per_ensemble=n_neurons / len(sys), synapse=tau,
                input_synapse=tau, radii=radii, realizer=Balanced(), dt=None)
            Connection(u, delay.input, synapse=None)

            # Since delay.state.input is the PSC x, when we can transform
            # that with C to get y (note D=0) without applying any filters
            assert np.allclose(delay.D, 0)
            output = Node(size_in=1)
            Connection(delay.state.input, output, transform=delay.C,
                       synapse=None)
            # Alternative: create an output tau*dy + y such that when
            # filtered we get back y! Note: dy = C(Ax + Bu), since D=0.
            #Connection(delay.state.output, output,
            #           transform=tau*delay.C.dot(delay.A), synapse=tau)
            #Connection(u, output,
            #           transform=tau*delay.C.dot(delay.B), synapse=tau)
            #Connection(delay.output, output, synapse=tau)

            p_u = Probe(u, synapse=None)
            p_x = Probe(delay.state.input, synapse=None)
            p_a = Probe(delay.state.add_neuron_output(), synapse=None)
            p_y = Probe(output, synapse=None)

        with Simulator(model, dt=dt, seed=seed) as sim:
            sim.run(T)

        # place the worst case at x=desired_radius and re-run
        worst_x = np.max(np.abs(sim.data[p_x]), axis=0)
        radii *= (worst_x / desired_radius)
        logging.info("Radii: %s\nWorst x: %s", radii, worst_x)

    return (theta, dt, sim.trange(), sim.data[p_u], sim.data[p_x],
            sim.data[p_a], sim.data[p_y])
コード例 #21
0
def conn_probe(model, probe):
    # Connection probes create a connection from the target, and probe
    # the resulting signal (used when you want to probe the default
    # output of an object, which may not have a predefined signal)

    synapse = 0  # Removed internal filtering

    # get any extra arguments if this probe was created to send data
    #  to an off-chip Node via the splitter

    kwargs = model.chip2host_params.get(probe, None)
    if kwargs is not None:
        # this probe is for sending data to a Node

        # determine the dimensionality
        input_dim = probe.target.size_out
        func = kwargs['function']
        if func is not None:
            if callable(func):
                input_dim = np.asarray(
                    func(np.zeros(input_dim, dtype=np.float64))).size
            else:
                input_dim = len(func[0])
        transform = kwargs['transform']
        transform = np.asarray(transform, dtype=np.float64)
        if transform.ndim <= 1:
            output_dim = input_dim
        elif transform.ndim == 2:
            assert transform.shape[1] == input_dim
            output_dim = transform.shape[0]
        else:
            raise NotImplementedError()

        target = nengo.Node(size_in=output_dim, add_to_container=False)

        conn = Connection(probe.target, target, synapse=synapse,
                          solver=probe.solver, add_to_container=False,
                          **kwargs
                          )
        model.probe_conns[probe] = conn
    else:
        conn = Connection(probe.target, probe, synapse=synapse,
                          solver=probe.solver, add_to_container=False,
                          )
        target = probe

    # Set connection's seed to probe's (which isn't used elsewhere)
    model.seeded[conn] = model.seeded[probe]
    model.seeds[conn] = model.seeds[probe]

    d = conn.size_out
    if isinstance(probe.target, Ensemble):
        # probed values are scaled by the target ensemble's radius
        scale = probe.target.radius
        w = np.diag(scale * np.ones(d))
        weights = np.vstack([w, -w])
    else:
        raise NotImplementedError(
            "Nodes cannot be onchip, connections not yet probeable")

    cx_probe = CxProbe(key='v', weights=weights, synapse=probe.synapse)
    model.objs[target]['in'] = cx_probe
    model.objs[target]['out'] = cx_probe

    # add an extra entry for simulator.run_steps to read data out
    model.objs[probe]['out'] = cx_probe

    # Build the connection
    model.build(conn)