def test_stateful(Simulator, sim_stateful, func_stateful, func):
    with Network() as net:
        config.configure_settings(stateful=sim_stateful)

        Ensemble(30, 1)

    with Simulator(net) as sim:
        kwargs = dict(n_steps=5, stateful=func_stateful)

        with pytest.warns(None) as recwarns:
            getattr(sim, func)(**kwargs)
        assert sim.n_steps == (5 if func_stateful and sim_stateful else 0)

        if func == "predict" and func_stateful and not sim_stateful:
            # note: we do not get warnings for predict_on_batch/run_steps because
            # they automatically set func_stateful=sim_stateful
            assert (len([
                w
                for w in recwarns if "Ignoring stateful=True" in str(w.message)
            ]) > 0)
        else:
            assert len(recwarns) == 0

        getattr(sim, func)(**kwargs)
        assert sim.n_steps == (10 if func_stateful and sim_stateful else 0)
def test_regular_spiking(Simulator, inference_only, seed):
    with nengo.Network() as net:
        config.configure_settings(inference_only=inference_only)

        inp = nengo.Node([1])
        ens0 = nengo.Ensemble(
            100,
            1,
            neuron_type=nengo.SpikingRectifiedLinear(amplitude=2),
            seed=seed)
        ens1 = nengo.Ensemble(
            100,
            1,
            neuron_type=nengo.RegularSpiking(nengo.RectifiedLinear(),
                                             amplitude=2),
            seed=seed,
        )

        nengo.Connection(inp, ens0)
        nengo.Connection(inp, ens1)

        p0 = nengo.Probe(ens0.neurons)
        p1 = nengo.Probe(ens1.neurons)

    with pytest.warns(None) as recwarns:
        with Simulator(net) as sim:
            sim.run_steps(50)

    assert np.allclose(sim.data[p0], sim.data[p1])
    # check that it is actually using the tensorflow implementation
    assert not any("native TensorFlow implementation" in str(w.message)
                   for w in recwarns)
def test_conditional_update(Simulator, use_loop, caplog):
    caplog.set_level(logging.INFO)

    with nengo.Network() as net:
        config.configure_settings(stateful=False, use_loop=use_loop)

        a = nengo.Ensemble(10, 1)
        b = nengo.Node(size_in=1)
        conn = nengo.Connection(a, b)

    with Simulator(net):
        pass

    assert "Number of state updates: 0" in caplog.text
    caplog.clear()

    conn.learning_rule_type = nengo.PES()

    with Simulator(net):
        pass

    assert "Number of state updates: 1" in caplog.text
    caplog.clear()

    with net:
        config.configure_settings(trainable=True)

    with Simulator(net):
        pass

    assert "Number of state updates: 1" in caplog.text
def test_densenet(Simulator, seed):
    tf.random.set_seed(seed)
    model = tf.keras.applications.densenet.DenseNet121(weights=None,
                                                       include_top=False,
                                                       input_shape=(112, 112,
                                                                    3))

    conv = converter.Converter(model,
                               allow_fallback=False,
                               max_to_avg_pool=True,
                               inference_only=True)

    keras_params = 0
    for layer in model.layers:
        if not isinstance(layer, BatchNormalization):
            for w in layer._trainable_weights:
                keras_params += np.prod(w.shape)

    # note: we don't expect any of the verification checks to pass, due to the
    # max_to_avg_pool swap, so just checking that the network structure has been
    # recreated
    with conv.net:
        # undo the inference_only=True so that parameters will be marked as
        # trainable (so that the check below will work)
        config.configure_settings(inference_only=False)

    with Simulator(conv.net) as sim:
        assert keras_params == sum(
            np.prod(w.shape) for w in sim.keras_model.trainable_weights)
Exemple #5
0
def test_learning_phase_warning(Simulator):
    with nengo.Network() as net:
        config.configure_settings(inference_only=True)
    with pytest.raises(BuildError, match="inference_only=True"):
        with tf.keras.backend.learning_phase_scope(1):
            with Simulator(net):
                pass
def test_soft_lif(Simulator, sigma, seed):
    with nengo.Network(seed=seed) as net:
        inp = nengo.Node([0.5])
        ens = nengo.Ensemble(10, 1, neuron_type=SoftLIFRate(sigma=sigma),
                             intercepts=nengo.dists.Uniform(-1, 0),
                             encoders=nengo.dists.Choice([[1]]))
        nengo.Connection(inp, ens)
        p = nengo.Probe(ens.neurons)

    x = str(ens.neuron_type)
    if sigma == 1:
        assert "sigma" not in x
    else:
        assert "sigma=%s" % sigma in x

    with nengo.Simulator(net) as sim:
        _, nengo_curves = nengo.utils.ensemble.tuning_curves(ens, sim)
        sim.run_steps(30)

    with net:
        config.configure_settings(dtype=tf.float64)

    with Simulator(net) as sim2:
        _, nengo_dl_curves = nengo.utils.ensemble.tuning_curves(ens, sim2)
        sim2.run_steps(30)

    assert np.allclose(nengo_curves, nengo_dl_curves)
    assert np.allclose(sim.data[p], sim2.data[p])
def test_soft_lif(Simulator, sigma, seed):
    with nengo.Network(seed=seed) as net:
        inp = nengo.Node([0.5])
        ens = nengo.Ensemble(
            10,
            1,
            neuron_type=SoftLIFRate(sigma=sigma),
            intercepts=nengo.dists.Uniform(-1, 0),
            encoders=nengo.dists.Choice([[1]]),
        )
        nengo.Connection(inp, ens)
        p = nengo.Probe(ens.neurons)

    x = str(ens.neuron_type)
    if sigma == 1:
        assert "sigma" not in x
    else:
        assert "sigma=%s" % sigma in x

    with nengo.Simulator(net) as sim:
        _, nengo_curves = nengo.utils.ensemble.tuning_curves(ens, sim)
        sim.run_steps(30)

    with net:
        config.configure_settings(dtype="float64")

    with Simulator(net) as sim2:
        _, nengo_dl_curves = nengo.utils.ensemble.tuning_curves(ens, sim2)
        sim2.run_steps(30)

    assert np.allclose(nengo_curves, nengo_dl_curves)
    assert np.allclose(sim.data[p], sim2.data[p])
def test_get_tensor(Simulator, use_loop):
    with nengo.Network() as net:
        config.configure_settings(use_loop=use_loop)

        a = nengo.Node([1])
        b = nengo.Ensemble(10, 1)
        c = nengo.Connection(a,
                             b.neurons,
                             transform=np.arange(10)[:, None],
                             synapse=None)
        p = nengo.Probe(c)

        # build a signal probe so that the indices get loaded into the sim
        # (checks that the indices reloading works properly)
        nengo.Probe(c, "weights")

    kwargs = dict() if use_loop else dict(unroll_simulation=10)
    with Simulator(net, **kwargs) as sim:
        tensor = sim.tensor_graph.get_tensor(sim.model.sig[c]["weights"])

        assert np.allclose(tf.keras.backend.get_value(tensor),
                           np.arange(10)[:, None])

        sim.run_steps(10)
        assert np.allclose(sim.data[p], np.arange(10)[None, :])
    def TestSimulator(net, *args, **kwargs):
        kwargs.setdefault("unroll_simulation", unroll)
        kwargs.setdefault("device", device)

        if net is not None and config.get_setting(net, "inference_only") is None:
            with net:
                config.configure_settings(inference_only=inference_only)

        if net is not None and config.get_setting(net, "dtype") is None:
            with net:
                config.configure_settings(dtype=dtype)

        return simulator.Simulator(net, *args, **kwargs)
def test_spiking_swap(Simulator, rate, spiking, seed):
    grads = []
    for neuron_type in [rate, spiking]:
        with nengo.Network(seed=seed) as net:
            config.configure_settings(dtype="float64")

            if rate == SoftLIFRate and neuron_type == spiking:
                config.configure_settings(lif_smoothing=1.0)

            a = nengo.Node(output=[1])
            b = nengo.Ensemble(50, 1, neuron_type=neuron_type())
            c = nengo.Ensemble(50, 1, neuron_type=neuron_type(amplitude=0.1))
            nengo.Connection(a, b, synapse=None)

            # note: we avoid decoders, as the rate/spiking models may have
            # different rate implementations in nengo, resulting in different
            # decoders
            nengo.Connection(b.neurons, c.neurons, synapse=None, transform=dists.He())
            p = nengo.Probe(c.neurons)

        with Simulator(net) as sim:
            if not sim.tensor_graph.inference_only:
                # TODO: this works in eager mode
                # with tf.GradientTape() as tape:
                #     tape.watch(sim.tensor_graph.trainable_variables)
                #     inputs = [
                #         tf.zeros((1, sim.unroll * 2, 1)),
                #         tf.constant([[sim.unroll * 2]]),
                #     ]
                #     outputs = sim.tensor_graph(inputs, training=True)
                # g = tape.gradient(outputs, sim.tensor_graph.trainable_variables)

                # note: not actually checking gradients, just using this to get the
                # gradients
                # TODO: why does the gradient check fail?
                if not sim.tensor_graph.inference_only:
                    g = sim.check_gradients(atol=1e10)[p]["analytic"]

                grads.append(g)

            sim.run(0.5)

        # check that the normal output is unaffected by the swap logic
        with nengo.Simulator(net) as sim2:
            sim2.run(0.5)

            assert np.allclose(sim.data[p], sim2.data[p])

    # check that the gradients match
    assert all(np.allclose(g0, g1) for g0, g1 in zip(*grads))
Exemple #11
0
def test_spiking_swap(Simulator, rate, spiking, seed):
    grads = []
    for neuron_type in [rate, spiking]:
        with nengo.Network(seed=seed) as net:
            config.configure_settings(dtype="float64")

            if rate == SoftLIFRate and neuron_type == spiking:
                config.configure_settings(lif_smoothing=1.0)

            a = nengo.Node(output=[1])
            b = nengo.Ensemble(50, 1, neuron_type=neuron_type())
            c = nengo.Ensemble(50, 1, neuron_type=neuron_type(amplitude=0.1))
            nengo.Connection(a, b, synapse=None)

            # note: we avoid decoders, as the rate/spiking models may have
            # different rate implementations in nengo, resulting in different
            # decoders
            nengo.Connection(b.neurons,
                             c.neurons,
                             synapse=None,
                             transform=dists.He())
            p = nengo.Probe(c.neurons)

        with Simulator(net) as sim:
            if not sim.tensor_graph.inference_only:
                with tf.GradientTape() as tape:
                    tape.watch(sim.tensor_graph.trainable_variables)
                    inputs = [
                        tf.zeros((1, sim.unroll * 2, 1)),
                        tf.constant([[sim.unroll * 2]]),
                    ]
                    outputs = sim.tensor_graph(inputs, training=True)
                g = tape.gradient(outputs,
                                  sim.tensor_graph.trainable_variables)

                grads.append(g)

            sim.run(0.5)

        # check that the normal output is unaffected by the swap logic
        with nengo.Simulator(net) as sim2:
            sim2.run(0.5)

            if not isinstance(neuron_type(), compat.PoissonSpiking):
                # we don't expect these to match for poissonspiking, since we have
                # different rng implementations in numpy vs tensorflow
                assert np.allclose(sim.data[p], sim2.data[p])

    # check that the gradients match
    assert all(np.allclose(g0, g1) for g0, g1 in zip(*grads))
def test_keep_history(Simulator, seed):
    with Network(seed=seed) as net:
        config.configure_settings(keep_history=True)
        a = Ensemble(30, 1)
        p = Probe(a.neurons, synapse=0.1)

    with Simulator(net) as sim:
        sim.run_steps(10)

    with net:
        net.config[p].keep_history = False

    with Simulator(net) as sim2:
        sim2.run_steps(10)

    assert sim.data[p].shape == (10, 30)
    assert sim2.data[p].shape == (1, 30)
    assert np.allclose(sim.data[p][[-1]], sim2.data[p])
def test_session_config(Simulator, as_model):
    with Network() as net:
        config.configure_settings(session_config={
            "graph_options.optimizer_options.opt_level": 21,
            "gpu_options.allow_growth": True})

    if as_model:
        # checking that config settings work when we pass in a model instead of
        # network
        model = Model(dt=0.001, builder=builder.NengoBuilder())
        model.build(net)
        net = None
    else:
        model = None

    with Simulator(net, model=model) as sim:
        assert sim.sess._config.graph_options.optimizer_options.opt_level == 21
        assert sim.sess._config.gpu_options.allow_growth
Exemple #14
0
def test_keep_history(Simulator, seed):
    with Network(seed=seed) as net:
        config.configure_settings(keep_history=True)
        a = Ensemble(30, 1)
        p = Probe(a.neurons, synapse=0.1)

    with Simulator(net) as sim:
        sim.run_steps(10)

    with net:
        net.config[p].keep_history = False

    with Simulator(net) as sim2:
        sim2.run_steps(10)

    assert sim.data[p].shape == (10, 30)
    assert sim2.data[p].shape == (1, 30)
    assert np.allclose(sim.data[p][[-1]], sim2.data[p])
def test_simulator_fixture(Simulator, pytestconfig):
    with Simulator(nengo.Network()) as sim:
        assert sim.tensor_graph.dtype == (
            tf.float32 if pytestconfig.getoption("--dtype") == "float32" else
            tf.float64)
        assert sim.unroll == pytestconfig.getoption("--unroll-simulation")
        assert sim.tensor_graph.device == pytestconfig.getoption("--device")
        assert (config.get_setting(sim.model, "inference_only") ==
                pytestconfig.getoption("--inference-only"))

    # check that manually specified values aren't overridden
    with nengo.Network() as net:
        config.configure_settings(dtype=tf.float64, inference_only=True)

    with Simulator(net, unroll_simulation=5, device="/cpu:0") as sim:
        assert sim.tensor_graph.dtype == tf.float64
        assert sim.unroll == 5
        assert sim.tensor_graph.device == "/cpu:0"
        assert config.get_setting(sim.model, "inference_only")
def test_keep_history(Simulator, use_loop, seed):
    with Network(seed=seed) as net:
        config.configure_settings(keep_history=True, use_loop=use_loop)
        a = Ensemble(30, 1)
        p = Probe(a.neurons, synapse=0.1)

    kwargs = dict() if use_loop else dict(unroll_simulation=10)
    with Simulator(net, **kwargs) as sim:
        sim.run_steps(10)

    with net:
        net.config[p].keep_history = False

    with Simulator(net, **kwargs) as sim2:
        sim2.run_steps(10)

    assert sim.data[p].shape == (10, 30)
    assert sim2.data[p].shape == (1, 30)
    assert np.allclose(sim.data[p][[-1]], sim2.data[p])
Exemple #17
0
def test_random_spiking(Simulator, inference_only, seed):
    with nengo.Network() as net:
        config.configure_settings(inference_only=inference_only)

        inp = nengo.Node([1])
        ens0 = nengo.Ensemble(100, 1, neuron_type=nengo.Tanh(), seed=seed)
        ens1 = nengo.Ensemble(
            100,
            1,
            neuron_type=nengo.StochasticSpiking(nengo.Tanh()),
            seed=seed,
        )
        ens2 = nengo.Ensemble(
            100,
            1,
            neuron_type=nengo.PoissonSpiking(nengo.Tanh()),
            seed=seed,
        )

        nengo.Connection(inp, ens0, synapse=None)
        nengo.Connection(inp, ens1, synapse=None)
        nengo.Connection(inp, ens2, synapse=None)

        p0 = nengo.Probe(ens0.neurons)
        p1 = nengo.Probe(ens1.neurons)
        p2 = nengo.Probe(ens2.neurons)

    with pytest.warns(None) as recwarns:
        with Simulator(net, seed=seed) as sim:
            sim.run_steps(10000)

    assert not any("native TensorFlow implementation" in str(w.message)
                   for w in recwarns)

    assert np.allclose(sim.data[p0][0],
                       np.mean(sim.data[p1], axis=0),
                       atol=1,
                       rtol=2e-1)
    assert np.allclose(sim.data[p0],
                       np.mean(sim.data[p2], axis=0),
                       atol=1,
                       rtol=1e-1)
Exemple #18
0
def test_session_config(Simulator, as_model):
    with Network() as net:
        config.configure_settings(
            session_config={
                "graph_options.optimizer_options.opt_level": 21,
                "gpu_options.allow_growth": True
            })

    if as_model:
        # checking that config settings work when we pass in a model instead of
        # network
        model = Model(dt=0.001, builder=builder.NengoBuilder())
        model.build(net)
        net = None
    else:
        model = None

    with Simulator(net, model=model) as sim:
        assert sim.sess._config.graph_options.optimizer_options.opt_level == 21
        assert sim.sess._config.gpu_options.allow_growth
def test_neuron_gradients(Simulator, neuron_type, seed, rng):
    # avoid intercepts around zero, which can cause errors in the
    # finite differencing in check_gradients
    intercepts = np.concatenate(
        (rng.uniform(-0.5, -0.2, size=25), rng.uniform(0.2, 0.5, size=25))
    )

    kwargs = {"sigma": 0.1} if neuron_type == SoftLIFRate else {}

    with nengo.Network(seed=seed) as net:
        config.configure_settings(dtype="float64")
        net.config[nengo.Ensemble].intercepts = intercepts
        a = nengo.Node(output=[0, 0])
        b = nengo.Ensemble(50, 2, neuron_type=neuron_type(**kwargs))
        c = nengo.Ensemble(50, 2, neuron_type=neuron_type(amplitude=0.1, **kwargs))
        nengo.Connection(a, b, synapse=None)
        nengo.Connection(b, c, synapse=None)
        nengo.Probe(c)

    with Simulator(net, seed=seed) as sim:
        sim.check_gradients()
def test_configure_trainable():
    with Network() as net:
        conf = net.config
        config.configure_settings(trainable=None)

    assert conf[Ensemble].trainable is None
    assert conf[Connection].trainable is None
    assert conf[ensemble.Neurons].trainable is None

    # check that we can set trainable after it is set up for configuration
    conf[Ensemble].trainable = True

    # check that boolean value is enforced
    with pytest.raises(ValidationError):
        conf[Ensemble].trainable = 5

    assert conf[Ensemble].trainable is True

    # check that calling configure again overrides previous changes
    with net:
        config.configure_settings(trainable=None)

    assert conf[Ensemble].trainable is None

    # check that non-None defaults work
    with net:
        config.configure_settings(trainable=False)

    assert conf[Ensemble].trainable is False

    # check that calling configure outside network context is an error
    with pytest.raises(NetworkContextError):
        config.configure_settings(trainable=None)

    # check that passing an invalid parameter raises an error
    with net:
        with pytest.raises(ConfigError):
            config.configure_settings(troinable=None)
Exemple #21
0
def test_configure_trainable():
    with Network() as net:
        conf = net.config
        config.configure_settings(trainable=None)

    assert conf[Ensemble].trainable is None
    assert conf[Connection].trainable is None
    assert conf[ensemble.Neurons].trainable is None

    # check that we can set trainable after it is set up for configuration
    conf[Ensemble].trainable = True

    # check that boolean value is enforced
    with pytest.raises(ValidationError):
        conf[Ensemble].trainable = 5

    assert conf[Ensemble].trainable is True

    # check that calling configure again overrides previous changes
    with net:
        config.configure_settings(trainable=None)

    assert conf[Ensemble].trainable is None

    # check that non-None defaults work
    with net:
        config.configure_settings(trainable=False)

    assert conf[Ensemble].trainable is False

    # check that calling configure outside network context is an error
    with pytest.raises(NetworkContextError):
        config.configure_settings(trainable=None)

    # check that passing an invalid parameter raises an error
    with net:
        with pytest.raises(ConfigError):
            config.configure_settings(troinable=None)
Exemple #22
0
def test_spiking_swap(Simulator, rate, spiking, seed):
    grads = []
    for neuron_type in [rate, spiking]:
        with nengo.Network(seed=seed) as net:
            config.configure_settings(dtype=tf.float64)

            if rate == SoftLIFRate and neuron_type == spiking:
                config.configure_settings(lif_smoothing=1.0)

            a = nengo.Node(output=[1])
            b = nengo.Ensemble(50, 1, neuron_type=neuron_type())
            c = nengo.Ensemble(50, 1, neuron_type=neuron_type(amplitude=0.1))
            nengo.Connection(a, b, synapse=None)

            # note: we avoid decoders, as the rate/spiking models may have
            # different rate implementations in nengo, resulting in different
            # decoders
            nengo.Connection(b.neurons, c.neurons, synapse=None,
                             transform=dists.He())
            p = nengo.Probe(c.neurons)

        with Simulator(net) as sim:
            grads.append(sim.sess.run(
                tf.gradients(sim.tensor_graph.probe_arrays[p],
                             tf.trainable_variables()),
                feed_dict=sim._fill_feed(10, training=True)))

            sim.soft_reset()
            sim.run(0.5)

        # check that the normal output is unaffected by the swap logic
        with nengo.Simulator(net) as sim2:
            sim2.run(0.5)

            assert np.allclose(sim.data[p], sim2.data[p])

    # check that the gradients match
    assert all(np.allclose(g0, g1) for g0, g1 in zip(*grads))
Exemple #23
0
def test_learning_phase(Simulator):
    with nengo.Network() as net:
        inp = nengo.Node([0])
        ens = nengo.Ensemble(1,
                             1,
                             gain=[0],
                             bias=[1],
                             neuron_type=nengo.SpikingRectifiedLinear())
        nengo.Connection(inp, ens, synapse=None)
        p = nengo.Probe(ens.neurons)

    with net:
        config.configure_settings(learning_phase=True)

    with Simulator(net) as sim:
        sim.run_steps(10)
        assert np.allclose(sim.data[p], 1)

    with net:
        config.configure_settings(learning_phase=None)

    with Simulator(net) as sim:
        sim.run_steps(10)
        assert np.allclose(sim.data[p], 0)
Exemple #24
0
    def __call__(
        self,
        input,
        transform=default_transform,
        shape_in=None,
        synapse=None,
        return_conn=False,
        **layer_args
    ):
        """
        Apply the TensorNode layer to the given input object.

        Parameters
        ----------
        input : ``NengoObject``
            Object providing input to the layer.
        transform : `~numpy.ndarray`
            Transform matrix to apply on connection from ``input`` to this layer.
        shape_in : tuple of int
            If not None, reshape the input to the given shape.
        synapse : float or `~nengo.synapses.Synapse`
            Synapse to apply on connection from ``input`` to this layer.
        return_conn : bool
            If True, also return the connection linking this layer to ``input``.
        layer_args : dict
            These arguments will be passed to `.TensorNode` if ``layer_func`` is a
            callable or Keras Layer, or `~nengo.Ensemble` if ``layer_func`` is a
            `~nengo.neurons.NeuronType`.

        Returns
        -------
        obj : `.TensorNode` or `~nengo.ensemble.Neurons`
            A TensorNode that implements the given layer function (if
            ``layer_func`` was a callable/Keras layer), or a Neuron object with the
            given neuron type, connected to ``input``.
        conn : `~nengo.Connection`
            If ``return_conn`` is True, also returns the connection object linking
            ``input`` and ``obj``.

        Notes
        -----
        The input connection created for the new TensorNode will be marked as
        non-trainable by default.
        """

        if shape_in is not None and all(x is not None for x in shape_in):
            size_in = np.prod(shape_in)
        elif isinstance(transform, np.ndarray) and transform.ndim == 2:
            size_in = transform.shape[0]
        else:
            size_in = input.size_out

        if isinstance(self.layer_func, NeuronType):
            obj = Ensemble(
                size_in, 1, neuron_type=self.layer_func, **layer_args
            ).neurons
        else:
            obj = TensorNode(
                self.layer_func,
                shape_in=(size_in,) if shape_in is None else shape_in,
                pass_time=False,
                **layer_args,
            )

        conn = Connection(input, obj, synapse=synapse, transform=transform)

        # set connection to non-trainable
        cfg = Config.context[0][conn]
        if not hasattr(cfg, "trainable"):
            configure_settings(trainable=None)
        cfg.trainable = False

        return (obj, conn) if return_conn else obj
def test_mark_signals_config():
    with nengo.Network() as net:
        config.configure_settings(trainable=None)
        net.config[nengo.Ensemble].trainable = False

        with nengo.Network():
            # check that object in subnetwork inherits config from parent
            ens0 = nengo.Ensemble(10, 1, label="ens0")

            # check that ens.neurons can be set independent of ens
            net.config[ens0.neurons].trainable = True

            with nengo.Network():
                with nengo.Network() as subnet:
                    # check that subnetworks can override parent configs
                    # net.config[nengo.Ensemble].trainable = True
                    net.config[subnet].trainable = True
                    ens1 = nengo.Ensemble(10, 1, label="ens1")

                    with nengo.Network():
                        # check that subnetworks inherit the trainable settings
                        # from parent networks
                        ens3 = nengo.Ensemble(10, 1, label="ens3")

            # check that instances can be set independent of class
            ens2 = nengo.Ensemble(10, 1, label="ens2")
            net.config[ens2].trainable = True

    model = nengo.builder.Model()
    model.build(net)

    progress = utils.NullProgressBar()

    tg = tensor_graph.TensorGraph(model, None, None, tf.float32, 1, None,
                                  progress)
    tg.mark_signals()

    assert not model.sig[ens0]["encoders"].trainable
    assert model.sig[ens0.neurons]["bias"].trainable

    assert model.sig[ens1]["encoders"].trainable

    assert model.sig[ens2]["encoders"].trainable

    assert model.sig[ens3]["encoders"].trainable

    # check that learning rule connections can be manually set to True
    with nengo.Network() as net:
        config.configure_settings(trainable=None)

        a = nengo.Ensemble(10, 1)
        b = nengo.Ensemble(10, 1)
        conn0 = nengo.Connection(a, b, learning_rule_type=nengo.PES())
        net.config[conn0].trainable = True

    model = nengo.builder.Model()
    model.build(net)

    tg = tensor_graph.TensorGraph(model, None, None, tf.float32, 1, None,
                                  progress)
    with pytest.warns(UserWarning):
        tg.mark_signals()

    assert model.sig[conn0]["weights"].trainable

    with nengo.Network() as net:
        config.configure_settings(trainable=None)

        a = nengo.Node([0])
        ens = nengo.Ensemble(10, 1)
        nengo.Connection(a, ens, learning_rule_type=nengo.Voja())
        net.config[nengo.Ensemble].trainable = True

    model = nengo.builder.Model()
    model.build(net)

    tg = tensor_graph.TensorGraph(model, None, None, tf.float32, 1, None,
                                  progress)
    with pytest.warns(UserWarning):
        tg.mark_signals()

    assert model.sig[ens]["encoders"].trainable

    # check that models with no toplevel work
    sig = nengo.builder.signal.Signal([0])
    op = nengo.builder.operator.Reset(sig, 1)
    model = nengo.builder.Model()
    model.add_op(op)

    tg = tensor_graph.TensorGraph(model, None, None, tf.float32, 1, None,
                                  progress)
    with pytest.warns(UserWarning):
        tg.mark_signals()

    assert not sig.trainable
Exemple #26
0
def test_remove_reset_inc_functional(Simulator, seed):
    with nengo.Network(seed=seed) as net:
        config.configure_settings(
            simplifications=[remove_zero_incs, remove_unmodified_resets])

        # reset+simprocess on the noise
        ens = nengo.Ensemble(1,
                             1,
                             noise=nengo.processes.WhiteNoise(),
                             neuron_type=nengo.Direct())

        node0 = nengo.Node(size_in=1, label="node0")
        # reset+elementwiseinc (weights)
        # reset+copy (to node input)
        nengo.Connection(ens, node0, transform=1, synapse=None)

        node1 = nengo.Node(size_in=3, label="node1")
        # reset+dotinc (weights)
        # reset+copy (to node input)
        nengo.Connection(node0, node1, transform=np.ones((3, 1)), synapse=None)

        # reset+elementwiseinc (weights, in nengo<3.1)
        # reset+copy (to probe input)
        p = nengo.Probe(node1)

    with Simulator(net) as sim:
        extra_op = LooseVersion(nengo.__version__) < "3.1.0"

        assert len(sim.tensor_graph.plan) == 8 + extra_op

        # check that we have all the resets we expect
        resets = sim.tensor_graph.plan[1]
        assert isinstance(resets[0], Reset)
        assert len(resets) == 6 + extra_op

        # check that all the ops are incs like we expect
        incs = sim.tensor_graph.plan[2:]
        for ops in incs:
            for op in ops:
                assert len(op.incs) == 1
                assert len(op.sets) == 0

        sim.run_steps(100)

    with net:
        config.configure_settings(simplifications=[
            remove_zero_incs,
            remove_unmodified_resets,
            remove_reset_incs,
        ])

    with Simulator(net) as sim_remove:
        # check that resets have been removed
        assert len(sim_remove.tensor_graph.plan) == 7 + extra_op
        assert (len([
            x for x in sim_remove.tensor_graph.plan if isinstance(x[0], Reset)
        ]) == 0)

        # check that all the ops are sets like we expect
        incs = sim_remove.tensor_graph.plan[1:]
        for ops in incs:
            for op in ops:
                assert len(op.incs) == 0
                assert len(op.sets) == 1

        sim_remove.run_steps(100)

    assert np.allclose(sim.data[p], sim_remove.data[p])
def test_mark_signals_config():
    with nengo.Network() as net:
        config.configure_settings(trainable=None)
        net.config[nengo.Ensemble].trainable = False

        with nengo.Network():
            # check that object in subnetwork inherits config from parent
            ens0 = nengo.Ensemble(10, 1, label="ens0")

            # check that ens.neurons can be set independent of ens
            net.config[ens0.neurons].trainable = True

            with nengo.Network():
                with nengo.Network():
                    # check that subnetworks can override parent configs
                    config.configure_settings(trainable=True)
                    ens1 = nengo.Ensemble(10, 1, label="ens1")

                    with nengo.Network():
                        # check that subnetworks inherit the trainable settings
                        # from parent networks
                        ens3 = nengo.Ensemble(10, 1, label="ens3")

            # check that instances can be set independent of class
            ens2 = nengo.Ensemble(10, 1, label="ens2")
            net.config[ens2].trainable = True

    model = nengo.builder.Model()
    model.build(net)

    progress = utils.NullProgressBar()

    tg = tensor_graph.TensorGraph(model, None, None, 1, None, progress, None)
    tg.mark_signals()

    assert not model.sig[ens0]["encoders"].trainable
    assert model.sig[ens0.neurons]["bias"].trainable

    assert model.sig[ens1]["encoders"].trainable

    assert model.sig[ens2]["encoders"].trainable

    assert model.sig[ens3]["encoders"].trainable

    # check that learning rule connections can be manually set to True
    with nengo.Network() as net:
        config.configure_settings(trainable=None)

        a = nengo.Ensemble(10, 1)
        b = nengo.Ensemble(10, 1)
        conn0 = nengo.Connection(a, b, learning_rule_type=nengo.PES())
        net.config[conn0].trainable = True

    model = nengo.builder.Model()
    model.build(net)

    tg = tensor_graph.TensorGraph(model, None, None, 1, None, progress, None)
    with pytest.warns(UserWarning):
        tg.mark_signals()

    assert model.sig[conn0]["weights"].trainable

    with nengo.Network() as net:
        config.configure_settings(trainable=None)

        a = nengo.Node([0])
        ens = nengo.Ensemble(10, 1)
        nengo.Connection(a, ens, learning_rule_type=nengo.Voja())
        net.config[nengo.Ensemble].trainable = True

    model = nengo.builder.Model()
    model.build(net)

    tg = tensor_graph.TensorGraph(model, None, None, 1, None, progress, None)
    with pytest.warns(UserWarning):
        tg.mark_signals()

    assert model.sig[ens]["encoders"].trainable

    # check that models with no toplevel work
    sig = nengo.builder.signal.Signal([0])
    op = nengo.builder.operator.Reset(sig, 1)
    model = nengo.builder.Model()
    model.add_op(op)

    tg = tensor_graph.TensorGraph(model, None, None, 1, None, progress, None)
    with pytest.warns(UserWarning):
        tg.mark_signals()

    assert not sig.trainable