Esempio n. 1
0
def test_lmu(Simulator, native_nengo, pytestconfig):
    n_steps = 1000
    net = benchmarks.lmu(
        n_steps, 1, native_nengo=native_nengo, dtype=pytestconfig.getoption("--dtype")
    )

    # TODO: It would be good to optimize LMU performance as the NengoDL implementation
    # is a bit slower than the original TensorFlow implementation.

    # benchmarks.run_profile(
    #     net,
    #     train=True,
    #     n_steps=n_steps if native_nengo else 1,
    #     do_profile=False,
    #     minibatch_size=100,
    #     unroll_simulation=25 if native_nengo else 1,
    #     reps=5,
    # )

    with Simulator(net) as sim:
        n_trainable = sum(
            np.prod(w.shape.as_list()) for w in sim.keras_model.trainable_weights
        )
        assert n_trainable == 102017

    assert net.inp.size_out == 1 if native_nengo else n_steps
    assert net.p.size_in == 10
Esempio n. 2
0
        (benchmarks.integrator(128, 32, nengo.LIF()), True, 64, 1.25, 1.45),
        (
            benchmarks.random_network(
                64,
                32,
                nengo.RectifiedLinear(),
                n_ensembles=20,
                connections_per_ensemble=5,
                seed=0,
            ),
            False,
            None,
            0.5,
            0.7,
        ),
        (benchmarks.lmu(1000, 1, native_nengo=True), True, 100, 1.05, 1.25),
    ],
)
def test_performance(net, train, minibatch_size, min, max):
    # performance is based on Azure NC6 VM
    # CPU: Intel Xeon E5-2690 v3 @ 2.60Ghz
    # GPU: Nvidia Tesla K80
    # Python version: 3.6.10
    # TensorFlow GPU version: 2.1.0
    # Nengo version: 3.1.0
    # NengoDL version: 3.1.0

    time = benchmarks.run_profile(
        net,
        minibatch_size=minibatch_size,
        train=train,
Esempio n. 3
0
        (
            benchmarks.random_network(
                64,
                32,
                nengo.RectifiedLinear(),
                n_ensembles=20,
                connections_per_ensemble=5,
                seed=0,
            ),
            False,
            None,
            True,
            0.5,
            0.7,
        ),
        (benchmarks.lmu(1000, 1, native_nengo=True), True, 100, True, 1.3, 1.5),
        (benchmarks.lmu(1000, 1, native_nengo=True), True, 100, False, 1.05, 1.25),
    ],
)
def test_performance(net, train, minibatch_size, eager, min, max):
    # performance is based on Azure NC6 VM
    # CPU: Intel Xeon E5-2690 v3 @ 2.60Ghz
    # GPU: Nvidia Tesla K80
    # Python version: 3.6.10
    # TensorFlow GPU version: 2.3.0
    # Nengo version: 3.1.0
    # NengoDL version: 3.3.0

    if not eager:
        tf.compat.v1.disable_eager_execution()
        tf.compat.v1.disable_control_flow_v2()
Esempio n. 4
0
        (benchmarks.integrator(128, 32, nengo.LIF()), True, 64, 0.95, 1.25),
        (
            benchmarks.random_network(
                64,
                32,
                nengo.RectifiedLinear(),
                n_ensembles=20,
                connections_per_ensemble=5,
                seed=0,
            ),
            False,
            None,
            0.35,
            0.55,
        ),
        (benchmarks.lmu(1000, 1, native_nengo=True), True, 100, 0.85, 1.15),
    ],
)
def test_performance(net, train, minibatch_size, min, max):
    # performance is based on ABR GPU server
    # CPU: Intel Xeon E5-1650 v3 @ 3.50GHz
    # GPU: GeForce GTX Titan X
    # Python version: 3.6.8
    # TensorFlow GPU version: 2.0.0
    # Nengo version: 3.1.0
    # NengoDL version: 3.1.0

    time = benchmarks.run_profile(
        net,
        minibatch_size=minibatch_size,
        train=train,