Exemplo n.º 1
0
def test_train_sparse(Simulator, seed):
    minibatch_size = 4
    n_hidden = 20

    with nengo.Network(seed=seed) as net:
        net.config[nengo.Ensemble].gain = nengo.dists.Choice([1])
        net.config[nengo.Ensemble].bias = nengo.dists.Choice([0])
        net.config[nengo.Ensemble].neuron_type = nengo.RectifiedLinear()
        net.config[nengo.Connection].synapse = None

        inp = nengo.Node([0, 0, 0, 0, 0])
        ens = nengo.Ensemble(n_hidden, 1)
        out = nengo.Node(size_in=2)
        nengo.Connection(inp[[0, 2, 3]], ens.neurons, transform=dists.Glorot())
        nengo.Connection(ens.neurons, out, transform=dists.Glorot())

        p = nengo.Probe(out)

    with Simulator(net,
                   minibatch_size=minibatch_size,
                   unroll_simulation=1,
                   seed=seed) as sim:
        x = np.asarray([[[0, 0, 0, 0, 0]], [[0, 0, 1, 0, 0]], [[1, 0, 0, 0,
                                                                0]],
                        [[1, 0, 1, 0, 0]]])
        y = np.asarray([[[0, 1]], [[1, 0]], [[1, 0]], [[0, 1]]])

        sim.train({inp: x}, {p: y},
                  tf.train.MomentumOptimizer(0.1, 0.9, use_nesterov=True),
                  n_epochs=500)

        sim.step(input_feeds={inp: x})

        assert np.allclose(sim.data[p], y, atol=1e-3)
Exemplo n.º 2
0
def test_train_ff(Simulator, neurons, seed):
    minibatch_size = 4
    n_hidden = 20

    with nengo.Network(seed=seed) as net:
        net.config[nengo.Ensemble].gain = nengo.dists.Choice([1])
        net.config[nengo.Ensemble].bias = nengo.dists.Choice([0])
        net.config[nengo.Connection].synapse = None

        # note: we have these weird input setup just so that we can test
        # training with two distinct inputs
        inp_a = nengo.Node([0])
        inp_b = nengo.Node([0])
        inp = nengo.Node(size_in=2)
        nengo.Connection(inp_a, inp[0])
        nengo.Connection(inp_b, inp[1])

        ens = nengo.Ensemble(n_hidden + 1,
                             n_hidden,
                             neuron_type=nengo.Sigmoid(tau_ref=1))
        out = nengo.Ensemble(1, 1, neuron_type=nengo.Sigmoid(tau_ref=1))
        nengo.Connection(inp,
                         ens.neurons if neurons else ens,
                         transform=dists.Glorot())
        nengo.Connection(ens.neurons if neurons else ens,
                         out.neurons,
                         transform=dists.Glorot())

        p = nengo.Probe(out.neurons)

    with Simulator(net,
                   minibatch_size=minibatch_size,
                   unroll_simulation=1,
                   seed=seed) as sim:
        x = np.asarray([[[0, 0]], [[0, 1]], [[1, 0]], [[1, 1]]])
        y = np.asarray([[[0.1]], [[0.9]], [[0.9]], [[0.1]]])

        sim.train({
            inp_a: x[..., [0]],
            inp_b: x[..., [1]]
        }, {p: y},
                  tf.train.AdamOptimizer(0.01),
                  n_epochs=500)

        sim.check_gradients(atol=5e-5)

        sim.step(input_feeds={inp_a: x[..., [0]], inp_b: x[..., [1]]})

        assert np.allclose(sim.data[p], y, atol=1e-3)
Exemplo n.º 3
0
def test_train_ff(Simulator, neurons, seed):
    minibatch_size = 4
    n_hidden = 20

    np.random.seed(seed)

    with nengo.Network() as net:
        net.config[nengo.Ensemble].gain = nengo.dists.Choice([1])
        net.config[nengo.Ensemble].bias = nengo.dists.Choice([0])
        net.config[nengo.Connection].synapse = None

        inp = nengo.Node([0, 0])
        ens = nengo.Ensemble(n_hidden + 1,
                             n_hidden,
                             neuron_type=nengo.Sigmoid(tau_ref=1))
        out = nengo.Ensemble(1, 1, neuron_type=nengo.Sigmoid(tau_ref=1))
        nengo.Connection(inp,
                         ens.neurons if neurons else ens,
                         transform=dists.Glorot())
        nengo.Connection(ens.neurons if neurons else ens,
                         out.neurons,
                         transform=dists.Glorot())

        # TODO: why does training fail if we probe out instead of out.neurons?
        p = nengo.Probe(out.neurons)

    with Simulator(net,
                   minibatch_size=minibatch_size,
                   unroll_simulation=1,
                   seed=seed) as sim:
        x = np.asarray([[[0, 0]], [[0, 1]], [[1, 0]], [[1, 1]]])
        y = np.asarray([[[0.1]], [[0.9]], [[0.9]], [[0.1]]])

        sim.train({inp: x}, {p: y},
                  tf.train.MomentumOptimizer(1, 0.9),
                  n_epochs=500)

        sim.check_gradients(atol=5e-5)

        sim.step(input_feeds={inp: x})

        assert np.allclose(sim.data[p], y, atol=1e-3)
Exemplo n.º 4
0
def test_fit(Simulator, seed):
    minibatch_size = 4
    n_hidden = 20

    with nengo.Network(seed=seed) as net:
        net.config[nengo.Ensemble].gain = nengo.dists.Choice([1])
        net.config[nengo.Ensemble].bias = nengo.dists.Choice([0])
        net.config[nengo.Connection].synapse = None

        # note: we have these weird input setup just so that we can test
        # training with two distinct inputs
        inp_a = nengo.Node([0])
        inp_b = nengo.Node([0])
        inp = nengo.Node(size_in=2)
        nengo.Connection(inp_a, inp[0], transform=1)
        nengo.Connection(inp_b, inp[1], transform=1)

        ens = nengo.Ensemble(n_hidden + 1,
                             n_hidden,
                             neuron_type=nengo.Sigmoid(tau_ref=1))
        out = nengo.Ensemble(1, 1, neuron_type=nengo.Sigmoid(tau_ref=1))
        nengo.Connection(inp, ens.neurons, transform=dists.Glorot())
        nengo.Connection(ens.neurons, out.neurons, transform=dists.Glorot())

        nengo.Probe(out.neurons)

    with Simulator(net,
                   minibatch_size=minibatch_size,
                   unroll_simulation=1,
                   seed=seed) as sim:
        x = np.asarray([[[0.0, 0.0]], [[0.0, 1.0]], [[1.0, 0.0]], [[1.0,
                                                                    1.0]]])
        y = np.asarray([[[0.1]], [[0.9]], [[0.9]], [[0.1]]])

        sim.compile(optimizer=tf.optimizers.Adam(0.01), loss=tf.losses.mse)
        # note: batch_size should be ignored
        with pytest.warns(UserWarning,
                          match="Batch size is determined statically"):
            history = sim.fit(
                [x[..., [0]], x[..., [1]]],
                y,
                validation_data=([x[..., [0]], x[..., [1]]], y),
                epochs=200,
                verbose=0,
                batch_size=-1,
            )
        assert history.history["loss"][-1] < 5e-4
        assert history.history["val_loss"][-1] < 5e-4

        # check that validation_sample_weights work correctly
        history = sim.fit(
            [x[..., [0]], x[..., [1]]],
            y,
            validation_data=([x[..., [0]], x[...,
                                             [1]]], y, np.zeros(y.shape[0])),
            epochs=1,
            verbose=0,
        )
        assert np.allclose(history.history["val_loss"][-1], 0)

        # TODO: this will work in eager mode
        # sim.reset()
        # history = sim.fit(
        #     [tf.constant(x[..., [0]]), tf.constant(x[..., [1]])],
        #     tf.constant(y),
        #     epochs=200,
        #     verbose=0,
        # )
        # assert history.history["loss"][-1] < 5e-4

        sim.reset()
        history = sim.fit(
            (((x[..., [0]], x[..., [1]], np.ones((4, 1), dtype=np.int32)), y)
             for _ in range(200)),
            epochs=20,
            steps_per_epoch=10,
            verbose=0,
        )
        assert history.history["loss"][-1] < 5e-4

    # TODO: this crashes if placed on GPU (but not in eager mode)
    with Simulator(
            net,
            minibatch_size=minibatch_size,
            unroll_simulation=1,
            seed=seed,
            device="/cpu:0",
    ) as sim:
        sim.compile(optimizer=tf.optimizers.Adam(0.01), loss=tf.losses.mse)

        history = sim.fit(
            tf.data.Dataset.from_tensors(
                ((x[..., [0]], x[..., [1]], np.ones((4, 1),
                                                    dtype=np.int32)), y)),
            validation_data=tf.data.Dataset.from_tensors(
                ((x[..., [0]], x[..., [1]], np.ones((4, 1),
                                                    dtype=np.int32)), y)),
            epochs=200,
            verbose=0,
        )
        assert history.history["loss"][-1] < 5e-4
        assert history.history["val_loss"][-1] < 5e-4
Exemplo n.º 5
0
def test_glorot(scale, distribution, seed):
    dist = dists.Glorot(scale=scale, distribution=distribution)
    _test_variance_scaling(dist, scale, "fan_avg", seed)
Exemplo n.º 6
0
    rng = np.random.RandomState(seed)
    dist = dists.TruncatedNormal(mean=0, stddev=stddev, limit=limit)
    if limit is None:
        limit = 2 * stddev
    samples = dist.sample(1000, 2000, rng=rng)
    assert samples.shape == (1000, 2000)
    assert np.allclose(np.mean(samples), 0.0, atol=5e-3)
    assert np.allclose(np.var(samples), tnorm_var(stddev, limit), rtol=5e-3)
    assert np.all(samples < limit)
    assert np.all(samples > -limit)

    # test with default rng
    samples = dist.sample(1000, 2000)
    assert samples.shape == (1000, 2000)


@pytest.mark.parametrize(
    "dist",
    [
        dists.TruncatedNormal(),
        dists.VarianceScaling(),
        dists.Glorot(),
        dists.He()
    ],
)
def test_seeding(dist, seed):
    assert np.allclose(
        dist.sample(100, rng=np.random.RandomState(seed)),
        dist.sample(100, rng=np.random.RandomState(seed)),
    )