def test_scale_firing_rates():
    inp = tf.keras.Input(shape=(1, ))
    x = tf.keras.layers.ReLU()(inp)
    model = tf.keras.Model(inp, x)

    # scaling doesn't affect output at all for non-spiking neurons
    conv = converter.Converter(model, scale_firing_rates=5)
    assert conv.verify()

    # works with existing amplitude values
    neuron_type = nengo.RectifiedLinear(amplitude=2)
    conv = converter.Converter(
        model,
        scale_firing_rates=5,
        swap_activations={nengo.RectifiedLinear(): neuron_type},
    )
    assert neuron_type.amplitude == 2
    assert conv.net.ensembles[0].neuron_type.amplitude == 2 / 5

    # warning when applying scaling to non-amplitude neuron type
    inp = tf.keras.Input(shape=(1, ))
    x = tf.keras.layers.Activation(tf.nn.sigmoid)(inp)
    model = tf.keras.Model(inp, x)

    with pytest.warns(UserWarning, match="does not support amplitude"):
        conv = converter.Converter(model, scale_firing_rates=5)

    with pytest.raises(ValueError, match="does not match output"):
        conv.verify()
def test_train_objective(Simulator, unroll, seed):
    minibatch_size = 1
    n_hidden = 20
    n_steps = 10

    with nengo.Network(seed=seed) as net:
        inp = nengo.Node([1])
        ens = nengo.Ensemble(n_hidden, 1, neuron_type=nengo.RectifiedLinear())
        nengo.Connection(inp, ens, synapse=0.01)
        p = nengo.Probe(ens)

    with Simulator(net,
                   minibatch_size=minibatch_size,
                   unroll_simulation=unroll,
                   seed=seed) as sim:
        x = np.ones((minibatch_size, n_steps, 1))
        y = np.zeros((minibatch_size, n_steps, 1))

        def obj(output, target):
            return tf.reduce_mean((output[:, -1] - 0.5 - target[:, -1])**2)

        sim.train({inp: x}, {p: y},
                  tf.train.MomentumOptimizer(1e-2, 0.9),
                  n_epochs=100,
                  objective=obj)

        sim.check_gradients([p])

        sim.run_steps(n_steps, input_feeds={inp: x})

        assert np.allclose(sim.data[p][:, -1], y[:, -1] + 0.5, atol=1e-3)
Beispiel #3
0
def test_train_sparse(Simulator, seed):
    minibatch_size = 4
    n_hidden = 20

    with nengo.Network(seed=seed) as net:
        net.config[nengo.Ensemble].gain = nengo.dists.Choice([1])
        net.config[nengo.Ensemble].bias = nengo.dists.Choice([0])
        net.config[nengo.Ensemble].neuron_type = nengo.RectifiedLinear()
        net.config[nengo.Connection].synapse = None

        inp = nengo.Node([0, 0, 0, 0, 0])
        ens = nengo.Ensemble(n_hidden, 1)
        out = nengo.Node(size_in=2)
        nengo.Connection(inp[[0, 2, 3]], ens.neurons, transform=dists.Glorot())
        nengo.Connection(ens.neurons, out, transform=dists.Glorot())

        p = nengo.Probe(out)

    with Simulator(net,
                   minibatch_size=minibatch_size,
                   unroll_simulation=1,
                   seed=seed) as sim:
        x = np.asarray([[[0, 0, 0, 0, 0]], [[0, 0, 1, 0, 0]], [[1, 0, 0, 0,
                                                                0]],
                        [[1, 0, 1, 0, 0]]])
        y = np.asarray([[[0, 1]], [[1, 0]], [[1, 0]], [[0, 1]]])

        sim.train({inp: x}, {p: y},
                  tf.train.MomentumOptimizer(0.1, 0.9, use_nesterov=True),
                  n_epochs=500)

        sim.step(input_feeds={inp: x})

        assert np.allclose(sim.data[p], y, atol=1e-3)
Beispiel #4
0
def test_regular_spiking(Simulator, inference_only, seed):
    with nengo.Network() as net:
        config.configure_settings(inference_only=inference_only)

        inp = nengo.Node([1])
        ens0 = nengo.Ensemble(
            100,
            1,
            neuron_type=nengo.SpikingRectifiedLinear(amplitude=2),
            seed=seed)
        ens1 = nengo.Ensemble(
            100,
            1,
            neuron_type=nengo.RegularSpiking(nengo.RectifiedLinear(),
                                             amplitude=2),
            seed=seed,
        )

        nengo.Connection(inp, ens0)
        nengo.Connection(inp, ens1)

        p0 = nengo.Probe(ens0.neurons)
        p1 = nengo.Probe(ens1.neurons)

    with pytest.warns(None) as recwarns:
        with Simulator(net) as sim:
            sim.run_steps(50)

    assert np.allclose(sim.data[p0], sim.data[p1])
    # check that it is actually using the tensorflow implementation
    assert not any("native TensorFlow implementation" in str(w.message)
                   for w in recwarns)
Beispiel #5
0
def test_config_basic():
    config = dict(n_neurons=400, radius=3, normalize_encoders=False)
    stim = stimuli(0).configure(**config)

    extra = dict(radius=4, neuron_type=nengo.RectifiedLinear())
    x = stim.decode(**extra)
    assert x._impl_kwargs == {**config, **extra}

    with nengo.Network() as model:
        x.make()

    ensembles = model.all_ensembles
    assert len(ensembles) == 1
    assert ensembles[0].n_neurons == 400
    assert ensembles[0].radius == 4  # extra takes precedence
    assert ensembles[0].normalize_encoders == False
    assert ensembles[0].neuron_type == nengo.RectifiedLinear()
Beispiel #6
0
def performance_samples(device):  # pragma: no cover
    """
    Run a brief sample of the benchmarks to check overall performance.

    This is mainly used to quickly check that there haven't been any unexpected
    performance regressions.
    """

    # TODO: automatically run some basic performance tests during CI

    default_kwargs = {
        "n_steps": 1000,
        "device": device,
        "unroll_simulation": 25,
        "progress_bar": False,
        "do_profile": False
    }

    print("cconv + relu")
    net = cconv(128, 64, nengo.RectifiedLinear())
    run_profile(net, minibatch_size=64, **default_kwargs)

    print("cconv + lif")
    net = cconv(128, 64, nengo.LIF())
    run_profile(net, minibatch_size=64, **default_kwargs)

    print("integrator training + relu")
    net = integrator(128, 32, nengo.RectifiedLinear())
    run_profile(net, minibatch_size=64, train=True, **default_kwargs)

    print("integrator training + lif")
    net = integrator(128, 32, nengo.LIF())
    run_profile(net, minibatch_size=64, train=True, **default_kwargs)

    print("random")
    net = random_network(128,
                         64,
                         nengo.RectifiedLinear(),
                         n_ensembles=50,
                         connections_per_ensemble=5,
                         seed=0)
    run_profile(net, **default_kwargs)

    print("spaun")
    net = spaun(1)
    run_profile(net, **default_kwargs)
Beispiel #7
0
 def __init__(self, net, max_rate=100):
     amp = 1 / max_rate
     net.config[nengo.Ensemble].neuron_type = nengo.RectifiedLinear(amplitude=amp)
     net.config[nengo.Ensemble].max_rates = nengo.dists.Choice([max_rate])
     net.config[nengo.Ensemble].intercepts = nengo.dists.Choice([0])
     net.config[nengo.Connection].synapse = None    
     self.net = net
     self.layers = []
     self.output_shapes = []
Beispiel #8
0
def profiling():
    """Run profiler on one of the benchmarks."""

    # note: in order for GPU profiling to work, you have to manually add
    # ...\CUDA\v8.0\extras\CUPTI\libx64 to your path
    net, p = pes(128, 32, nengo.RectifiedLinear())
    with nengo_dl.Simulator(net,
                            tensorboard=False,
                            unroll_simulation=50,
                            device="/gpu:0") as sim:
        sim.run_steps(150, profile=True)
Beispiel #9
0
def test_run_profile(train, pytestconfig):
    net = benchmarks.integrator(3, 2, nengo.RectifiedLinear())

    benchmarks.run_profile(
        net,
        train=train,
        n_steps=10,
        do_profile=False,
        device=pytestconfig.getvalue("--device"),
        unroll_simulation=pytest.config.getvalue("--unroll_simulation"),
        dtype=(tf.float32 if pytest.config.getvalue("dtype") == "float32" else
               tf.float64))

    assert net.config[net].inference_only == (not train)
def test_tensor_layer(Simulator):
    with nengo.Network() as net:
        inp = nengo.Node(np.arange(12))

        # check that connection arguments work
        layer0 = tensor_layer(inp, tf.identity, transform=2)

        assert isinstance(layer0, TensorNode)
        p0 = nengo.Probe(layer0)

        # check that arguments are passed to layer function
        layer1 = tensor_layer(layer0,
                              lambda x, axis: tf.reduce_sum(x, axis=axis),
                              axis=1,
                              shape_in=(2, 6))
        assert layer1.size_out == 6
        p1 = nengo.Probe(layer1)

        # check that ensemble layers work
        layer2 = tensor_layer(layer1,
                              nengo.RectifiedLinear(),
                              gain=[1] * 6,
                              bias=[-20] * 6)
        assert isinstance(layer2, nengo.ensemble.Neurons)
        assert np.allclose(layer2.ensemble.gain, 1)
        assert np.allclose(layer2.ensemble.bias, -20)
        p2 = nengo.Probe(layer2)

        # check that size_in can be inferred from transform
        layer3 = tensor_layer(layer2, lambda x: x, transform=np.ones((1, 6)))
        assert layer3.size_in == 1

        # check that size_in can be inferred from shape_in
        layer4 = tensor_layer(layer3,
                              lambda x: x,
                              transform=nengo.dists.Uniform(-1, 1),
                              shape_in=(2, ))
        assert layer4.size_in == 2

    with Simulator(net, minibatch_size=2) as sim:
        sim.step()

    x = np.arange(12) * 2
    assert np.allclose(sim.data[p0], x)

    x = np.sum(np.reshape(x, (2, 6)), axis=0)
    assert np.allclose(sim.data[p1], x)

    x = np.maximum(x - 20, 0)
    assert np.allclose(sim.data[p2], x)
Beispiel #11
0
 def inh_ens_config(self):
     """(Config) Defaults for inhibitory input ensemble creation."""
     cfg = nengo.Config(nengo.Ensemble, nengo.Connection)
     cfg[nengo.Ensemble].update({
         "neuron_type":
         nengo.RectifiedLinear(),
         "radius":
         1,
         "intercepts":
         nengo.dists.Choice([0.1] * self.dimensions),
         "max_rates":
         nengo.dists.Choice([40])
     })
     cfg[nengo.Connection].synapse = None
     return cfg
def test_dense_fallback_bias():
    def relu(x):
        return tf.maximum(x, 0)

    inp = tf.keras.Input((1, ))
    out = tf.keras.layers.Dense(units=10, activation=relu)(inp)

    _test_convert(inp, out, allow_fallback=True)

    # double check that extra biases aren't added when we _are_ using an Ensemble
    conv = converter.Converter(
        tf.keras.Model(inp, out),
        allow_fallback=False,
        swap_activations={relu: nengo.RectifiedLinear()},
    )
    assert conv.verify(training=True)
Beispiel #13
0
    def _add_neuron_layer(self, layer):
        inputs = [self._get_input(layer)]
        neuron = layer["neuron"]
        ntype = neuron["type"]
        n = layer["outputs"]

        gain = 1.0
        bias = 0.0
        amplitude = 1.0
        if ntype == "ident":
            neuron_type = nengo.Direct()
        elif ntype == "relu":
            neuron_type = nengo.RectifiedLinear()
        elif ntype == "logistic":
            neuron_type = nengo.Sigmoid()
        elif ntype == "softlif":

            tau_ref, tau_rc, alpha, amp, sigma = [
                neuron["params"][k] for k in ["t", "r", "a", "m", "g"]
            ]
            lif_type = self.lif_type.lower()
            if lif_type == "lif":
                neuron_type = nengo.LIF(tau_rc=tau_rc, tau_ref=tau_ref)
            elif lif_type == "lifrate":
                neuron_type = nengo.LIFRate(tau_rc=tau_rc, tau_ref=tau_ref)
            elif lif_type == "softlifrate":
                neuron_type = SoftLIFRate(sigma=sigma,
                                          tau_rc=tau_rc,
                                          tau_ref=tau_ref)
            else:
                raise KeyError("Unrecognized LIF type %r" % self.lif_type)
            gain = alpha
            bias = 1.0
            amplitude = amp
        else:
            raise NotImplementedError("Neuron type %r" % ntype)

        return self.add_neuron_layer(
            n,
            inputs=inputs,
            neuron_type=neuron_type,
            synapse=self.synapse,
            gain=gain,
            bias=bias,
            amplitude=amplitude,
            name=layer["name"],
        )
Beispiel #14
0
def test_neurons_equality():
    with nengo.Network():
        neuron_type = nengo.LIF()
        ens0 = nengo.Ensemble(10, 1, neuron_type=neuron_type)
        assert ens0.neurons == ens0.neurons
        assert ens0.neurons != neuron_type

        ens1 = nengo.Ensemble(10, 1, neuron_type=neuron_type)
        assert ens0.neurons != ens1.neurons
        assert ens0.neuron_type == ens1.neuron_type

        # this is the only way you could ever have two neurons objects that are
        # equal but have different type
        old_neurons = ens1.neurons
        old_neuron_type = ens1.neuron_type
        ens1.neuron_type = nengo.RectifiedLinear()
        assert ens1.neurons == old_neurons
        assert ens1.neuron_type != old_neuron_type
Beispiel #15
0
    def _add_neuron_layer(self, layer):
        neuron = layer['neuron']
        ntype = neuron['type']
        n = layer['outputs']

        e = nengo.Ensemble(n, 1, label='%s_neurons' % layer['name'])
        e.gain = np.ones(n)
        e.bias = np.zeros(n)

        transform = 1.
        if ntype == 'ident':
            e.neuron_type = nengo.Direct()
        elif ntype == 'relu':
            e.neuron_type = nengo.RectifiedLinear()
        elif ntype == 'logistic':
            e.neuron_type = nengo.Sigmoid()
        elif ntype == 'softlif':
            from .neurons import SoftLIFRate
            tau_ref, tau_rc, alpha, amp, sigma, noise = [
                neuron['params'][k] for k in ['t', 'r', 'a', 'm', 'g', 'n']]
            lif_type = self.lif_type.lower()
            if lif_type == 'lif':
                e.neuron_type = nengo.LIF(tau_rc=tau_rc, tau_ref=tau_ref)
            elif lif_type == 'lifrate':
                e.neuron_type = nengo.LIFRate(tau_rc=tau_rc, tau_ref=tau_ref)
            elif lif_type == 'softlifrate':
                e.neuron_type = SoftLIFRate(
                    sigma=sigma, tau_rc=tau_rc, tau_ref=tau_ref)
            else:
                raise KeyError("Unrecognized LIF type %r" % self.lif_type)
            e.gain = alpha * np.ones(n)
            e.bias = np.ones(n)
            transform = amp
        else:
            raise NotImplementedError("Neuron type %r" % ntype)

        node = nengo.Node(size_in=n, label=layer['name'])
        nengo.Connection(self._get_input(layer), e.neurons, synapse=None)
        nengo.Connection(
            e.neurons, node, transform=transform, synapse=self.synapse)
        return node
def test_gradients(Simulator, unroll, seed):
    minibatch_size = 4

    with nengo.Network(seed=seed) as net:
        net.config[nengo.Ensemble].gain = nengo.dists.Choice([1])
        net.config[nengo.Ensemble].bias = nengo.dists.Uniform(-1, 1)

        inp = nengo.Node([0], label="inp")

        # sigmoid neurons
        ens = nengo.Ensemble(10, 1, neuron_type=nengo.Sigmoid())

        # normal decoded connection
        nengo.Connection(inp, ens)

        # recurrent connection
        nengo.Connection(ens, ens, transform=0.1)

        # rectified neurons
        ens2 = nengo.Ensemble(10, 2, neuron_type=nengo.RectifiedLinear())

        # neuron--neuron connection
        nengo.Connection(ens,
                         ens2,
                         transform=[[1], [1]],
                         solver=nengo.solvers.LstsqL2(weights=True))

        # sliced output, no synapse
        nengo.Connection(inp, ens2[0], synapse=None, transform=0.5)

        # sliced input, sliced output
        inp2 = nengo.Node([0, 0], label="inp2")
        nengo.Connection(inp2[0], ens2[1])

        nengo.Probe(ens)
        nengo.Probe(ens2)

    with Simulator(net,
                   unroll_simulation=unroll,
                   minibatch_size=minibatch_size) as sim:
        sim.check_gradients(atol=1e-4)
Beispiel #17
0
def test_swap_activations_key_never_used():
    """
    Ensure warnings are thrown properly when there is an unused swap activations key.
    """

    def relu(x):
        return tf.maximum(x, 0)

    def relu2(x):
        return tf.maximum(x, 0)

    inp = tf.keras.Input((1,))
    out = tf.keras.layers.Dense(units=10, activation=relu)(inp)

    # Test that swap_activations are throwing warnings when not used
    with pytest.warns(UserWarning, match="no layers in the model with that activation"):
        conv = converter.Converter(
            tf.keras.Model(inp, out),
            allow_fallback=False,
            swap_activations={
                relu: nengo.RectifiedLinear(),
                relu2: nengo.RectifiedLinear(),
            },
        )
    assert conv.swap_activations.unused_keys() == {relu2}

    # Test that there is no warning if all keys are used
    inp = tf.keras.Input((1,))
    out = tf.keras.layers.Dense(units=10, activation=relu)(inp)
    out = tf.keras.layers.Dense(units=10, activation=relu2)(out)
    with pytest.warns(None) as recwarns:
        conv = converter.Converter(
            tf.keras.Model(inp, out),
            allow_fallback=False,
            swap_activations={
                relu: nengo.RectifiedLinear(),
                relu2: nengo.RectifiedLinear(),
                nengo.RectifiedLinear(): nengo.SpikingRectifiedLinear(),
            },
        )
    assert not any(
        "no layers in the model with that activation" in w.message for w in recwarns
    )
    assert len(conv.swap_activations.unused_keys()) == 0

    # check swap_activations dict functions
    assert len(conv.swap_activations) == 3
    assert set(conv.swap_activations.keys()) == {relu, relu2, nengo.RectifiedLinear()}
Beispiel #18
0
def test_multiple_objective(Simulator, seed):
    with nengo.Network(seed=seed) as net:
        net.config[nengo.Connection].synapse = None

        a = nengo.Node([0])

        # note: b is configured this way so that the output, and therefore
        # loss, will be equal to the input, so we can control it easily
        b = nengo.Ensemble(100,
                           1,
                           neuron_type=nengo.RectifiedLinear(),
                           gain=nengo.dists.Choice([1]),
                           bias=nengo.dists.Choice([0]),
                           encoders=nengo.dists.Choice([[1]]))

        c = nengo.Ensemble(10, 1)

        nengo.Connection(a, b)
        nengo.Connection(a, c)

        p_b = nengo.Probe(b)
        p_c = nengo.Probe(c)

    with Simulator(net, unroll_simulation=1) as sim:
        inputs = {a: np.ones((10, 1, 1))}
        targets = {p_b: np.zeros((10, 1, 1)), p_c: np.zeros((10, 1, 1))}
        objective = {p_b: lambda x, y: x, p_c: lambda x, y: x * 0}

        assert np.allclose(sim.loss(inputs, targets, objective), 1, atol=1e-3)

        b_bias = np.copy(sim.data[b].bias)
        c_bias = sim.data[c].bias
        sim.train(inputs,
                  targets,
                  tf.train.GradientDescentOptimizer(1.0),
                  objective=objective,
                  n_epochs=10)
        assert np.allclose(sim.data[c].bias, c_bias)
        assert not np.allclose(sim.data[b].bias, b_bias)
    def _add_neuron_layer(self, layer):
        inputs = [self._get_input(layer)]
        neuron = layer['neuron']
        ntype = neuron['type']
        n = layer['outputs']

        gain = 1.
        bias = 0.
        amplitude = 1.
        if ntype == 'ident':
            neuron_type = nengo.Direct()
        elif ntype == 'relu':
            neuron_type = nengo.RectifiedLinear()
        elif ntype == 'logistic':
            neuron_type = nengo.Sigmoid()
        elif ntype == 'softlif':
            from .neurons import SoftLIFRate
            tau_ref, tau_rc, alpha, amp, sigma, noise = [
                neuron['params'][k] for k in ['t', 'r', 'a', 'm', 'g', 'n']]
            lif_type = self.lif_type.lower()
            if lif_type == 'lif':
                neuron_type = nengo.LIF(tau_rc=tau_rc, tau_ref=tau_ref)
            elif lif_type == 'lifrate':
                neuron_type = nengo.LIFRate(tau_rc=tau_rc, tau_ref=tau_ref)
            elif lif_type == 'softlifrate':
                neuron_type = SoftLIFRate(
                    sigma=sigma, tau_rc=tau_rc, tau_ref=tau_ref)
            else:
                raise KeyError("Unrecognized LIF type %r" % self.lif_type)
            gain = alpha
            bias = 1.
            amplitude = amp
        else:
            raise NotImplementedError("Neuron type %r" % ntype)

        return self.add_neuron_layer(
            n, inputs=inputs, neuron_type=neuron_type, synapse=self.synapse,
            gain=gain, bias=bias, amplitude=amplitude, name=layer['name'])
Beispiel #20
0
def test_train_recurrent(Simulator, truncation, seed):
    batch_size = 100
    minibatch_size = 100
    n_hidden = 30
    n_steps = 10

    with nengo.Network(seed=seed) as net:
        inp = nengo.Node([0])
        ens = nengo.Ensemble(n_hidden,
                             1,
                             neuron_type=nengo.RectifiedLinear(),
                             gain=np.ones(n_hidden),
                             bias=np.linspace(-1, 1, n_hidden))
        out = nengo.Node(size_in=1)

        nengo.Connection(inp, ens, synapse=None)
        nengo.Connection(ens, ens, synapse=0)
        nengo.Connection(ens, out, synapse=None)

        p = nengo.Probe(out)

    with Simulator(net, minibatch_size=minibatch_size, seed=seed) as sim:
        x = np.outer(np.linspace(0, 1, batch_size), np.ones(n_steps))[:, :,
                                                                      None]
        y = np.outer(np.linspace(0, 1, batch_size),
                     np.linspace(0, 1, n_steps))[:, :, None]

        sim.train({inp: x}, {p: y},
                  tf.train.RMSPropOptimizer(1e-3),
                  n_epochs=200,
                  truncation=truncation)

        sim.check_gradients(sim.tensor_graph.build_loss({p: "mse"}))

        sim.run_steps(n_steps, input_feeds={inp: x[:minibatch_size]})

    assert np.sqrt(np.mean(
        (sim.data[p] - y[:minibatch_size])**2)) < (0.1 if truncation else 0.05)
Beispiel #21
0
def linear_net():
    """
    A simple network with an input, output, and no nonlinearity.
    """

    with nengo.Network() as net:
        a = nengo.Node([1])

        # note: in theory this would be nengo.Node(size_in=1), but due to
        # https://github.com/tensorflow/tensorflow/issues/23383
        # TensorFlow will hang
        b = nengo.Ensemble(1,
                           1,
                           neuron_type=nengo.RectifiedLinear(),
                           gain=np.ones(1),
                           bias=np.ones(1) * 1e-6)
        configure_settings(trainable=None)
        net.config[b.neurons].trainable = False
        nengo.Connection(a, b.neurons, synapse=None)

        p = nengo.Probe(b.neurons)

    return net, a, p
Beispiel #22
0
def test_networks(benchmark):
    dimensions = 16
    neurons_per_d = 10
    neuron_type = nengo.RectifiedLinear()

    net = benchmark(dimensions, neurons_per_d, neuron_type)

    try:
        assert net.inp.size_out == dimensions
    except AttributeError:
        assert net.inp_a.size_out == dimensions
        assert net.inp_b.size_out == dimensions

    assert net.p.size_in == dimensions

    for ens in net.all_ensembles:
        assert ens.neuron_type == neuron_type
        if benchmark == benchmarks.cconv:
            # the cconv network divides the neurons between two ensemble
            # arrays
            assert ens.n_neurons == ens.dimensions * (neurons_per_d // 2)
        else:
            assert ens.n_neurons == ens.dimensions * neurons_per_d
    def build_network(neurons_per_d, seed):
        with nengo.Network(seed=seed) as net:
            net.config[nengo.Ensemble].neuron_type = nengo.RectifiedLinear()
            net.config[nengo.Ensemble].gain = nengo.dists.Uniform(0.5, 1)
            net.config[nengo.Ensemble].bias = nengo.dists.Uniform(-0.1, 0.1)
            net.config[nengo.Connection].synapse = None

            net.role_inp = nengo.Node(np.zeros(dims))
            net.fill_inp = nengo.Node(np.zeros(dims))
            net.cue_inp = nengo.Node(np.zeros(dims))

            # circular convolution network to combine roles/fillers
            cconv = nengo.networks.CircularConvolution(neurons_per_d, dims)
            nengo.Connection(net.role_inp, cconv.input_a)
            nengo.Connection(net.fill_inp, cconv.input_b)

            # memory network to store the role/filler pairs
            memory = nengo.Ensemble(neurons_per_d * dims, dims)
            tau = 0.01
            nengo.Connection(cconv.output,
                             memory,
                             transform=tau / t_int,
                             synapse=tau)
            nengo.Connection(memory, memory, transform=1, synapse=tau)

            # another circular convolution network to extract the cued filler
            ccorr = nengo.networks.CircularConvolution(neurons_per_d,
                                                       dims,
                                                       invert_b=True)
            nengo.Connection(memory, ccorr.input_a)
            nengo.Connection(net.cue_inp, ccorr.input_b)

            net.conv_probe = nengo.Probe(cconv.output, label="conv_probe")
            net.memory_probe = nengo.Probe(memory, label="memory_probe")
            net.output_probe = nengo.Probe(ccorr.output, label="output_probe")

        return net
Beispiel #24
0
def test_tensor_layer(Simulator):
    with nengo.Network() as net:
        inp = nengo.Node(np.arange(12))

        layer0 = tensor_layer(inp, tf.identity, transform=2)

        assert isinstance(layer0, TensorNode)
        p0 = nengo.Probe(layer0)

        layer1 = tensor_layer(layer0,
                              lambda x, axis: tf.reduce_sum(x, axis=axis),
                              axis=1,
                              shape_in=(2, 6))
        assert layer1.size_out == 6
        p1 = nengo.Probe(layer1)

        layer2 = tensor_layer(layer1,
                              nengo.RectifiedLinear(),
                              gain=[1] * 6,
                              bias=[-20] * 6)
        assert isinstance(layer2, nengo.ensemble.Neurons)
        assert np.allclose(layer2.ensemble.gain, 1)
        assert np.allclose(layer2.ensemble.bias, -20)
        p2 = nengo.Probe(layer2)

    with Simulator(net, minibatch_size=2) as sim:
        sim.step()

    x = np.arange(12) * 2
    assert np.allclose(sim.data[p0], x)

    x = np.sum(np.reshape(x, (2, 6)), axis=0)
    assert np.allclose(sim.data[p1], x)

    x = np.maximum(x - 20, 0)
    assert np.allclose(sim.data[p2], x)
Beispiel #25
0
    def evaluate(self, p, plt):
        files = []
        sets = []
        for f in os.listdir(p.dataset_dir):
            if f.endswith('events'):
                files.append(os.path.join(p.dataset_dir, f))

        if p.test_set == 'one':
            test_file = random.sample(files, 1)[0]
            files.remove(test_file)

        if p.n_data != -1:
            files = random.sample(files, p.n_data)

        inputs = []
        targets = []
        for f in files:
            print(f)
            times, imgs, targs = davis_track.load_data(
                f,
                dt=p.dt,
                decay_time=p.decay_time,
                separate_channels=p.separate_channels,
                saturation=p.saturation,
                merge=p.merge)
            inputs.append(imgs)
            targets.append(targs[:, :2])

        inputs_all = np.vstack(inputs)
        targets_all = np.vstack(targets)

        if p.test_set == 'odd':
            inputs_train = inputs_all[::2]
            inputs_test = inputs_all[1::2]
            targets_train = targets_all[::2]
            targets_test = targets_all[1::2]
        elif p.test_set == 'one':
            times, imgs, targs = davis_track.load_data(
                test_file,
                dt=p.dt_test,
                decay_time=p.decay_time,
                separate_channels=p.separate_channels,
                saturation=p.saturation)
            inputs_test = imgs
            targets_test = targs[:, :2]
            inputs_train = inputs_all
            targets_train = targets_all

        if p.augment:
            inputs_train, targets_train = davis_track.augment(
                inputs_train,
                targets_train,
                separate_channels=p.separate_channels)

        if p.separate_channels:
            shape = (360 // p.merge, 240 // p.merge)
        else:
            shape = (180 // p.merge, 240 // p.merge)

        dimensions = shape[0] * shape[1]
        eval_points_train = inputs_train.reshape(-1, dimensions)
        eval_points_test = inputs_test.reshape(-1, dimensions)

        model = nengo.Network()
        with model:
            from nengo_extras.vision import Gabor, Mask
            encoders = Gabor().generate(p.n_neurons,
                                        (p.gabor_size, p.gabor_size))
            encoders = Mask(shape).populate(encoders, flatten=True)

            ens = nengo.Ensemble(
                n_neurons=p.n_neurons,
                dimensions=dimensions,
                encoders=encoders,
                neuron_type=nengo.RectifiedLinear(),
                intercepts=nengo.dists.CosineSimilarity(p.gabor_size**2 + 2))

            result = nengo.Node(None, size_in=targets_all.shape[1])

            c = nengo.Connection(
                ens,
                result,
                eval_points=eval_points_train,
                function=targets_train,
                solver=nengo.solvers.LstsqL2(reg=p.reg),
            )
        sim = nengo.Simulator(model)

        error_train = sim.data[c].solver_info['rmses']

        _, a_train = nengo.utils.ensemble.tuning_curves(
            ens, sim, inputs=eval_points_train)
        outputs_train = np.dot(a_train, sim.data[c].weights.T)
        rmse_train = np.sqrt(
            np.mean((targets_train - outputs_train)**2, axis=0))
        _, a_test = nengo.utils.ensemble.tuning_curves(ens,
                                                       sim,
                                                       inputs=eval_points_test)
        outputs_test = np.dot(a_test, sim.data[c].weights.T)
        filt = nengo.synapses.Lowpass(p.output_filter)
        outputs_test = filt.filt(outputs_test, dt=p.dt_test)
        targets_test = filt.filt(targets_test, dt=p.dt_test)
        rmse_test = np.sqrt(np.mean(
            (targets_test - outputs_test)**2, axis=0)) * p.merge

        if plt:
            plt.subplot(2, 1, 1)
            plt.plot(targets_train, ls='--')
            plt.plot(outputs_train)
            plt.title('train\nrmse=%1.4f,%1.4f' % tuple(rmse_train))

            plt.subplot(2, 1, 2)
            plt.plot(targets_test, ls='--')
            plt.plot(outputs_test)
            plt.title('test\nrmse=%1.4f,%1.4f' % tuple(rmse_test))

        return dict(
            rmse_train=rmse_train,
            rmse_test=rmse_test,
        )
    def __init__(self, kp=0, kd=0, neural=False, adapt = False, num_motors=4, neuron_model=False, pes_learning_rate=1e-4):
        #TODO
        self.kp = kp
        self.kd = kd
        self.prev_time = time.time()
        self.output = np.zeros(num_motors)
        self.adapt = adapt
        self.pes_learning_rate = pes_learning_rate
        self.neuron_model = neuron_model

        if neural == True: 
            model = nengo.Network(label="Adaptive Controller")
            tau_rc =  0.02 #TODO: Check if this is in ms or s.
            tau_ref = 0.002
            if self.neuron_model == "RELU":
                cur_model = nengo.RectifiedLinear()
            elif self.neuron_model == "LIF":
                cur_model = nengo.LIF(tau_rc=tau_rc, tau_ref=tau_ref) #lif model object.
            elif self.neuron_model == "LIFRate":
                cur_model = nengo.LIFRate(tau_rc=tau_rc, tau_ref=tau_ref) #lif model object.

            def output_func(t, x):
                self.output = np.copy(x)

            def input_func_q(t, x):
                return self.q
            def input_func_dq(t, x):
                return self.dq
            def input_func_target(t, x):
                return self.target
            def input_func_d_target(t, x):
                return self.d_target

            with model:
                
                output = nengo.Node(output_func, size_in=num_motors, size_out=0)
                input_q = nengo.Node(input_func_q, size_in=num_motors, size_out=num_motors)
                input_dq = nengo.Node(input_func_dq, size_in=num_motors, size_out=num_motors)
                input_target = nengo.Node(input_func_target, size_in=num_motors, size_out=num_motors)
                input_d_target = nengo.Node(input_func_d_target, size_in=num_motors, size_out=num_motors)

                pes_learning_rate = 1e-4
                
                #Adaptive component
                if self.adapt:
                    adapt_ens = nengo.Ensemble(
                            n_neurons=1000, dimensions=num_motors,
                            radius=1.5,
                            neuron_type=cur_model)

                    learn_conn = nengo.Connection(
                            adapt_ens,
                            output,
                            learning_rule_type=nengo.PES(pes_learning_rate))

                for i in range(num_motors):
                    inverter = nengo.Ensemble(500, dimensions=2, radius=1.5, neuron_type = cur_model)
                    proportional = nengo.Ensemble(500, dimensions=1, radius=1.5, neuron_type = cur_model) 
                    derivative = nengo.Ensemble(500, dimensions=1, radius=1.5, neuron_type = cur_model)
                    control_signal = nengo.Ensemble(500, dimensions=1, radius=1.5, neuron_type = cur_model)
                         
                    
                    # invert terms that will be subtracted. 
                    nengo.Connection(input_q[i], inverter[0], synapse=None, function=lambda x: x*-1)
                    nengo.Connection(input_dq[i], inverter[1], synapse=None, function=lambda x: x*-1)

                    # calculate proportional part
                    nengo.Connection(inverter[0], proportional, synapse=None, function=lambda x:x*kp)
                    nengo.Connection(input_target[i], proportional, synapse=None, function=lambda x:x*kp)

                    # calculate derivative part
                    nengo.Connection(inverter[1], derivative, synapse=None, function=lambda x:(x*kd))
                    nengo.Connection(input_d_target[i], derivative, synapse=None, function=lambda x:(x*kd))

                    # output
                    nengo.Connection(proportional, control_signal)
                    nengo.Connection(derivative, control_signal)
                    nengo.Connection(control_signal, output[i])
                    
                    if self.adapt:
                        # adapt connections
                        nengo.Connection(input_q[i], adapt_ens, function=lambda x: np.zeros(num_motors), synapse=None)
                        nengo.Connection(control_signal, learn_conn.learning_rule[i], transform=-1, synapse=None)
                    
                    # Nodes to access PID components. 
                    # PID_Ens.append({"inverter":inverter, 
                    #                 "proportional":proportional, 
                    #                 "derivative": derivative, #TODO: Remove all except control signal. 
                    #                 "control_signal": control_signal})

                     # control_signal_p = nengo.Probe(PID_Ens[0]["control_signal"], synapse=.01)

            self.sim = nengo.Simulator(model)
Beispiel #27
0
def mnist(use_tensor_layer=True):
    """
    A network designed to stress-test tensor layers (based on mnist net).

    Parameters
    ----------
    use_tensor_layer : bool
        If True, use individual tensor_layers to build the network, as opposed
        to a single TensorNode containing all layers.

    Returns
    -------
    net : `nengo.Network`
        benchmark network
    """

    with nengo.Network() as net:
        # create node to feed in images
        net.inp = nengo.Node(np.ones(28 * 28))

        if use_tensor_layer:
            nengo_nl = nengo.RectifiedLinear()

            ensemble_params = dict(max_rates=nengo.dists.Choice([100]),
                                   intercepts=nengo.dists.Choice([0]))
            amplitude = 1
            synapse = None

            x = nengo_dl.tensor_layer(net.inp,
                                      tf.layers.conv2d,
                                      shape_in=(28, 28, 1),
                                      filters=32,
                                      kernel_size=3)
            x = nengo_dl.tensor_layer(x, nengo_nl, **ensemble_params)

            x = nengo_dl.tensor_layer(x,
                                      tf.layers.conv2d,
                                      shape_in=(26, 26, 32),
                                      transform=amplitude,
                                      filters=32,
                                      kernel_size=3)
            x = nengo_dl.tensor_layer(x, nengo_nl, **ensemble_params)

            x = nengo_dl.tensor_layer(x,
                                      tf.layers.average_pooling2d,
                                      shape_in=(24, 24, 32),
                                      synapse=synapse,
                                      transform=amplitude,
                                      pool_size=2,
                                      strides=2)

            x = nengo_dl.tensor_layer(x, tf.layers.dense, units=128)
            x = nengo_dl.tensor_layer(x, nengo_nl, **ensemble_params)

            x = nengo_dl.tensor_layer(x,
                                      tf.layers.dropout,
                                      rate=0.4,
                                      transform=amplitude)

            x = nengo_dl.tensor_layer(x, tf.layers.dense, units=10)
        else:
            nl = tf.nn.relu

            # def softlif_layer(x, sigma=1, tau_ref=0.002, tau_rc=0.02,
            #                   amplitude=1):
            #     # x -= 1
            #     z = tf.nn.softplus(x / sigma) * sigma
            #     z += 1e-10
            #     rates = amplitude / (tau_ref + tau_rc * tf.log1p(1 / z))
            #     return rates

            @nengo_dl.reshaped((28, 28, 1))
            def mnist_node(_, x):  # pragma: no cover
                x = tf.layers.conv2d(x,
                                     filters=32,
                                     kernel_size=3,
                                     activation=nl)
                x = tf.layers.conv2d(x,
                                     filters=32,
                                     kernel_size=3,
                                     activation=nl)
                x = tf.layers.average_pooling2d(x, pool_size=2, strides=2)
                x = tf.contrib.layers.flatten(x)
                x = tf.layers.dense(x, 128, activation=nl)
                x = tf.layers.dropout(x, rate=0.4)
                x = tf.layers.dense(x, 10)

                return x

            node = nengo_dl.TensorNode(mnist_node,
                                       size_in=28 * 28,
                                       size_out=10)
            x = node
            nengo.Connection(net.inp, node, synapse=None)

        net.p = nengo.Probe(x)

    return net
    def evaluate(self, p, plt):
        files = []
        sets = []
        for f in os.listdir(p.dataset_dir):
            if f.endswith('events'):
                files.append(os.path.join(p.dataset_dir, f))

        if p.test_set == 'one':
            test_file = random.sample(files, 1)[0]
            files.remove(test_file)
        
        if p.n_data != -1:
            files = random.sample(files, p.n_data)
            
        inputs = []
        targets = []
        for f in files:
            times, imgs, targs = davis_tracking.load_data(f, dt=p.dt, decay_time=p.decay_time,
                                                  separate_channels=p.separate_channels, 
                                                  saturation=p.saturation, merge=p.merge)
            inputs.append(imgs)
            targets.append(targs[:,:2])
                                
        inputs_all = np.vstack(inputs)
        targets_all = np.vstack(targets)
        
        if p.test_set == 'odd':
            inputs_train = inputs_all[::2]
            inputs_test = inputs_all[1::2]
            targets_train = targets_all[::2]
            targets_test = targets_all[1::2]
            dt_test = p.dt*2
        elif p.test_set == 'one':
            times, imgs, targs = davis_tracking.load_data(test_file, dt=p.dt_test, decay_time=p.decay_time,
                                                  separate_channels=p.separate_channels, 
                                                  saturation=p.saturation, merge=p.merge)
            inputs_test = imgs
            targets_test = targs[:, :2]
            inputs_train = inputs_all
            targets_train = targets_all
            dt_test = p.dt_test
            
        if p.augment:
            inputs_train, targets_train = davis_tracking.augment(inputs_train, targets_train,
                                                                 separate_channels=p.separate_channels)                
                      
        if p.separate_channels:
            shape = (2, 180//p.merge, 240//p.merge)
        else:
            shape = (1, 180//p.merge, 240//p.merge)
        
        dimensions = shape[0]*shape[1]*shape[2]

        
        if p.normalize:
            magnitude = np.linalg.norm(inputs_train.reshape(-1, dimensions), axis=1)
            inputs_train = inputs_train*(1.0/magnitude[:,None,None])
            
            magnitude = np.linalg.norm(inputs_test.reshape(-1, dimensions), axis=1)
            inputs_test = inputs_test*(1.0/magnitude[:,None,None])
                    
        
        
        max_rate = 100
        amp = 1 / max_rate

        model = nengo.Network()
        with model:
            model.config[nengo.Ensemble].neuron_type = nengo.RectifiedLinear(amplitude=amp)
            model.config[nengo.Ensemble].max_rates = nengo.dists.Choice([max_rate])
            model.config[nengo.Ensemble].intercepts = nengo.dists.Choice([0])
            model.config[nengo.Connection].synapse = None

            inp = nengo.Node(
                nengo.processes.PresentInput(inputs_test.reshape(-1, dimensions), dt_test),
                size_out=dimensions,
                )

            out = nengo.Node(None, size_in=2)
            
            if not p.split_spatial:
                # do a standard convnet
                conv1 = nengo.Convolution(p.n_features_1, shape, channels_last=False, strides=(p.stride_1,p.stride_1),
                                          kernel_size=(p.kernel_size_1, p.kernel_size_1))
                layer1 = nengo.Ensemble(conv1.output_shape.size, dimensions=1)
                nengo.Connection(inp, layer1.neurons, transform=conv1)

                conv2 = nengo.Convolution(p.n_features_2, conv1.output_shape, channels_last=False, strides=(p.stride_2,p.stride_2),
                                          kernel_size=(p.kernel_size_2, p.kernel_size_2))
                layer2 = nengo.Ensemble(conv2.output_shape.size, dimensions=1)
                nengo.Connection(layer1.neurons, layer2.neurons, transform=conv2)

                nengo.Connection(layer2.neurons, out, transform=nengo_dl.dists.Glorot())
            else:
                # do the weird spatially split convnet
                convnet = davis_tracking.ConvNet(nengo.Network())
                convnet.make_input_layer(
                        shape,
                        spatial_stride=(p.spatial_stride, p.spatial_stride), 
                        spatial_size=(p.spatial_size,p.spatial_size))
                nengo.Connection(inp, convnet.input)
                convnet.make_middle_layer(n_features=p.n_features_1, n_parallel=p.n_parallel, n_local=1,
                                          kernel_stride=(p.stride_1,p.stride_1), kernel_size=(p.kernel_size_1,p.kernel_size_1))
                convnet.make_middle_layer(n_features=p.n_features_2, n_parallel=p.n_parallel, n_local=1,
                                          kernel_stride=(p.stride_2,p.stride_2), kernel_size=(p.kernel_size_2,p.kernel_size_2))
                convnet.make_output_layer(2)
                nengo.Connection(convnet.output, out)
                         

            p_out = nengo.Probe(out)


        N = len(inputs_train)
        n_steps = int(np.ceil(N/p.minibatch_size))
        dl_train_data = {inp: np.resize(inputs_train, (p.minibatch_size, n_steps, dimensions)),
                         p_out: np.resize(targets_train, (p.minibatch_size, n_steps, 2))}
        N = len(inputs_test)
        n_steps = int(np.ceil(N/p.minibatch_size))
        dl_test_data = {inp: np.resize(inputs_test, (p.minibatch_size, n_steps, dimensions)),
                        p_out: np.resize(targets_test, (p.minibatch_size, n_steps, 2))}
        with nengo_dl.Simulator(model, minibatch_size=p.minibatch_size) as sim:
            #loss_pre = sim.loss(dl_test_data)

            if p.n_epochs > 0:
                sim.train(dl_train_data, tf.train.RMSPropOptimizer(learning_rate=p.learning_rate),
                          n_epochs=p.n_epochs)

            loss_post = sim.loss(dl_test_data)

            sim.run_steps(n_steps, data=dl_test_data)

        data = sim.data[p_out].reshape(-1,2)[:len(targets_test)]
        
        rmse_test = np.sqrt(np.mean((targets_test-data)**2, axis=0))*p.merge          
        if plt:
            plt.plot(data*p.merge)
            plt.plot(targets_test*p.merge, ls='--')
            
        return dict(
            rmse_test = rmse_test,
            max_n_neurons = max([ens.n_neurons for ens in model.all_ensembles]),
            test_targets = targets_test,
            test_output = data,
            test_loss = loss_post
            )
Beispiel #29
0
    assert net.inp.size_out == 28 * 28
    assert net.p.size_in == 10


def test_spaun():
    pytest.importorskip("_spaun")

    dimensions = 2

    net = benchmarks.spaun(dimensions=dimensions)
    assert net.mem.mb1_net.output.size_in == dimensions


@pytest.mark.parametrize(
    "dimensions, neurons_per_d, neuron_type, n_ensembles, n_connections",
    ((1, 10, nengo.RectifiedLinear(), 5, 3), (2, 4, nengo.LIF(), 10, 2)),
)
def test_random_network(
    dimensions, neurons_per_d, neuron_type, n_ensembles, n_connections
):
    net = benchmarks.random_network(
        dimensions, neurons_per_d, neuron_type, n_ensembles, n_connections
    )
    _test_random(
        net, dimensions, neurons_per_d, neuron_type, n_ensembles, n_connections
    )


def _test_random(
    net, dimensions, neurons_per_d, neuron_type, n_ensembles, n_connections
):
Beispiel #30
0
def test_tensor_layer(Simulator):
    with nengo.Network() as net:
        inp = nengo.Node(np.arange(12))

        # check that connection arguments work
        layer0 = Layer(tf.identity)(inp, transform=2)

        assert isinstance(layer0, TensorNode)
        p0 = nengo.Probe(layer0)

        # check that arguments can be passed to layer function
        layer1 = Layer(
            partial(lambda x, axis: tf.reduce_sum(x, axis=axis),
                    axis=1))(layer0, shape_in=(2, 6))
        assert layer1.size_out == 6
        p1 = nengo.Probe(layer1)

        class TestFunc:
            def __init__(self, axis):
                self.axis = axis

            def __call__(self, x):
                return tf.reduce_sum(x, axis=self.axis)

        layer1b = Layer(TestFunc(axis=1))(layer0, shape_in=(2, 6))
        assert layer1b.size_out == 6

        # check that ensemble layers work
        layer2 = Layer(nengo.RectifiedLinear())(layer1,
                                                gain=[1] * 6,
                                                bias=[-20] * 6)
        assert isinstance(layer2, nengo.ensemble.Neurons)
        assert np.allclose(layer2.ensemble.gain, 1)
        assert np.allclose(layer2.ensemble.bias, -20)
        p2 = nengo.Probe(layer2)

        # check that size_in can be inferred from transform
        layer3 = Layer(lambda x: x)(layer2, transform=np.ones((1, 6)))
        assert layer3.size_in == 1

        # check that size_in can be inferred from shape_in
        layer4 = Layer(lambda x: x)(layer3,
                                    transform=nengo.dists.Uniform(-1, 1),
                                    shape_in=(2, ))
        assert layer4.size_in == 2

        # check that conn is marked non-trainable
        with nengo.Network():
            _, conn = Layer(tf.identity)(inp, return_conn=True)
        assert not net.config[conn].trainable

    with Simulator(net, minibatch_size=2) as sim:
        sim.step()

    x = np.arange(12) * 2
    assert np.allclose(sim.data[p0], x)

    x = np.sum(np.reshape(x, (2, 6)), axis=0)
    assert np.allclose(sim.data[p1], x)

    x = np.maximum(x - 20, 0)
    assert np.allclose(sim.data[p2], x)