예제 #1
0
def test_spiking_swap(Simulator, rate, spiking, seed):
    grads = []
    for neuron_type in [rate, spiking]:
        with nengo.Network(seed=seed) as net:
            config.configure_settings(dtype="float64")

            if rate == SoftLIFRate and neuron_type == spiking:
                config.configure_settings(lif_smoothing=1.0)

            a = nengo.Node(output=[1])
            b = nengo.Ensemble(50, 1, neuron_type=neuron_type())
            c = nengo.Ensemble(50, 1, neuron_type=neuron_type(amplitude=0.1))
            nengo.Connection(a, b, synapse=None)

            # note: we avoid decoders, as the rate/spiking models may have
            # different rate implementations in nengo, resulting in different
            # decoders
            nengo.Connection(b.neurons, c.neurons, synapse=None, transform=dists.He())
            p = nengo.Probe(c.neurons)

        with Simulator(net) as sim:
            if not sim.tensor_graph.inference_only:
                # TODO: this works in eager mode
                # with tf.GradientTape() as tape:
                #     tape.watch(sim.tensor_graph.trainable_variables)
                #     inputs = [
                #         tf.zeros((1, sim.unroll * 2, 1)),
                #         tf.constant([[sim.unroll * 2]]),
                #     ]
                #     outputs = sim.tensor_graph(inputs, training=True)
                # g = tape.gradient(outputs, sim.tensor_graph.trainable_variables)

                # note: not actually checking gradients, just using this to get the
                # gradients
                # TODO: why does the gradient check fail?
                if not sim.tensor_graph.inference_only:
                    g = sim.check_gradients(atol=1e10)[p]["analytic"]

                grads.append(g)

            sim.run(0.5)

        # check that the normal output is unaffected by the swap logic
        with nengo.Simulator(net) as sim2:
            sim2.run(0.5)

            assert np.allclose(sim.data[p], sim2.data[p])

    # check that the gradients match
    assert all(np.allclose(g0, g1) for g0, g1 in zip(*grads))
예제 #2
0
def test_spiking_swap(Simulator, rate, spiking, seed):
    grads = []
    for neuron_type in [rate, spiking]:
        with nengo.Network(seed=seed) as net:
            config.configure_settings(dtype="float64")

            if rate == SoftLIFRate and neuron_type == spiking:
                config.configure_settings(lif_smoothing=1.0)

            a = nengo.Node(output=[1])
            b = nengo.Ensemble(50, 1, neuron_type=neuron_type())
            c = nengo.Ensemble(50, 1, neuron_type=neuron_type(amplitude=0.1))
            nengo.Connection(a, b, synapse=None)

            # note: we avoid decoders, as the rate/spiking models may have
            # different rate implementations in nengo, resulting in different
            # decoders
            nengo.Connection(b.neurons,
                             c.neurons,
                             synapse=None,
                             transform=dists.He())
            p = nengo.Probe(c.neurons)

        with Simulator(net) as sim:
            if not sim.tensor_graph.inference_only:
                with tf.GradientTape() as tape:
                    tape.watch(sim.tensor_graph.trainable_variables)
                    inputs = [
                        tf.zeros((1, sim.unroll * 2, 1)),
                        tf.constant([[sim.unroll * 2]]),
                    ]
                    outputs = sim.tensor_graph(inputs, training=True)
                g = tape.gradient(outputs,
                                  sim.tensor_graph.trainable_variables)

                grads.append(g)

            sim.run(0.5)

        # check that the normal output is unaffected by the swap logic
        with nengo.Simulator(net) as sim2:
            sim2.run(0.5)

            if not isinstance(neuron_type(), compat.PoissonSpiking):
                # we don't expect these to match for poissonspiking, since we have
                # different rng implementations in numpy vs tensorflow
                assert np.allclose(sim.data[p], sim2.data[p])

    # check that the gradients match
    assert all(np.allclose(g0, g1) for g0, g1 in zip(*grads))
예제 #3
0
def test_spiking_swap(Simulator, rate, spiking, seed):
    grads = []
    for neuron_type in [rate, spiking]:
        with nengo.Network(seed=seed) as net:
            config.configure_settings(dtype=tf.float64)

            if rate == SoftLIFRate and neuron_type == spiking:
                config.configure_settings(lif_smoothing=1.0)

            a = nengo.Node(output=[1])
            b = nengo.Ensemble(50, 1, neuron_type=neuron_type())
            c = nengo.Ensemble(50, 1, neuron_type=neuron_type(amplitude=0.1))
            nengo.Connection(a, b, synapse=None)

            # note: we avoid decoders, as the rate/spiking models may have
            # different rate implementations in nengo, resulting in different
            # decoders
            nengo.Connection(b.neurons, c.neurons, synapse=None,
                             transform=dists.He())
            p = nengo.Probe(c.neurons)

        with Simulator(net) as sim:
            grads.append(sim.sess.run(
                tf.gradients(sim.tensor_graph.probe_arrays[p],
                             tf.trainable_variables()),
                feed_dict=sim._fill_feed(10, training=True)))

            sim.soft_reset()
            sim.run(0.5)

        # check that the normal output is unaffected by the swap logic
        with nengo.Simulator(net) as sim2:
            sim2.run(0.5)

            assert np.allclose(sim.data[p], sim2.data[p])

    # check that the gradients match
    assert all(np.allclose(g0, g1) for g0, g1 in zip(*grads))
예제 #4
0
def test_he(scale, distribution, seed):
    dist = dists.He(scale=scale, distribution=distribution)
    _test_variance_scaling(dist, scale**2, "fan_in", seed)
예제 #5
0
    rng = np.random.RandomState(seed)
    dist = dists.TruncatedNormal(mean=0, stddev=stddev, limit=limit)
    if limit is None:
        limit = 2 * stddev
    samples = dist.sample(1000, 2000, rng=rng)
    assert samples.shape == (1000, 2000)
    assert np.allclose(np.mean(samples), 0.0, atol=5e-3)
    assert np.allclose(np.var(samples), tnorm_var(stddev, limit), rtol=5e-3)
    assert np.all(samples < limit)
    assert np.all(samples > -limit)

    # test with default rng
    samples = dist.sample(1000, 2000)
    assert samples.shape == (1000, 2000)


@pytest.mark.parametrize(
    "dist",
    [
        dists.TruncatedNormal(),
        dists.VarianceScaling(),
        dists.Glorot(),
        dists.He()
    ],
)
def test_seeding(dist, seed):
    assert np.allclose(
        dist.sample(100, rng=np.random.RandomState(seed)),
        dist.sample(100, rng=np.random.RandomState(seed)),
    )