예제 #1
0
def test_dtype(Simulator, request, seed, bits):
    # Ensure dtype is set back to default after the test, even if it fails
    default = nengo.rc.get("precision", "bits")
    request.addfinalizer(lambda: nengo.rc.set("precision", "bits", default))

    float_dtype = np.dtype(getattr(np, "float%s" % bits))
    int_dtype = np.dtype(getattr(np, "int%s" % bits))

    with nengo.Network() as model:
        nengo_dl.configure_settings(dtype="float%s" % bits)

        u = nengo.Node([0.5, -0.4])
        a = nengo.Ensemble(10, 2)
        nengo.Connection(u, a)
        p = nengo.Probe(a)

    with Simulator(model) as sim:
        sim.step()

        # check that the builder has created signals of the correct dtype
        # (note that we may not necessarily use that dtype during simulation)
        for sig in sim.tensor_graph.signals:
            assert sig.dtype in (float_dtype,
                                 int_dtype), ("Signal '%s' wrong dtype" % sig)

        objs = (obj for obj in model.all_objects if sim.data[obj] is not None)
        for obj in objs:
            for x in (x for x in sim.data[obj] if isinstance(x, np.ndarray)):
                assert x.dtype == float_dtype, obj

        assert sim.data[p].dtype == float_dtype
예제 #2
0
def build_SNN(image_size, config, spiking=True):
    with nengo.Network(seed=config["seed"]) as net:
        # remove some unnecessary features to speed up the training
        nengo_dl.configure_settings(stateful=False)

        # input node
        inp = nengo.Node(np.zeros(image_size[-1]))
        out = nengo.Node(size_in=101)

        if spiking:
            # lmu cell
            lmu = LMUCellSpike(units=800,
                               order=2,
                               theta=image_size[1],
                               input_d=image_size[-1])
            conn = nengo.Connection(inp, lmu.x, synapse=None)
        else:
            lmu = LMUCell(units=800,
                          order=2,
                          theta=image_size[1],
                          input_d=image_size[-1])
            conn = nengo.Connection(inp, lmu.x, synapse=None)
            # dense linear readout

        nengo.Connection(lmu.h,
                         out,
                         transform=nengo_dl.dists.Glorot(),
                         synapse=None)
        p = nengo.Probe(out)
    return net
예제 #3
0
def test_reuse_vars(Simulator):
    def my_func(_, x):
        # note: the control dependencies thing is due to some weird tensorflow
        # issue with creating variables inside while loops
        with tf.control_dependencies(None):
            w = tf.get_variable("weights", initializer=tf.constant(2.0))

        return x * tf.cast(w, x.dtype)

    with nengo.Network() as net:
        configure_settings(trainable=False)

        inp = nengo.Node([1])
        node = TensorNode(my_func, size_in=1)
        p = nengo.Probe(node)
        nengo.Connection(inp, node, synapse=None)

    with Simulator(net, unroll_simulation=5) as sim:
        sim.run_steps(5)
        assert np.allclose(sim.data[p], 2)

        with sim.tensor_graph.graph.as_default():
            vars = tf.trainable_variables()

        assert len(vars) == 1
        assert vars[0].get_shape() == ()
        assert sim.sess.run(vars[0]) == 2
예제 #4
0
 def gen_init(self):
     model = nengo.Network()
     with model:
         model.config[nengo.Ensemble].max_rates = nengo.dists.Choice([100])
         model.config[nengo.Ensemble].intercepts = nengo.dists.Choice([0])
         nengo_dl.configure_settings(trainable=False)
     return model
예제 #5
0
    def _build(self, num_classes, num_layers, num_filters, kernel_sizes):
        channel_each_layer = ([1] + num_filters)
        with nengo.Network() as net:
            # set some default parameters for the neurons that will make
            # the training progress more smoothly
            net.config[nengo.Ensemble].max_rates = nengo.dists.Choice([100])
            net.config[nengo.Ensemble].intercepts = nengo.dists.Choice([0])
            neuron_type = nengo.LIF(amplitude = 0.01)

            # init state, make neural trainable
            nengo_dl.configure_settings(trainable = True)

            inp = nengo.Node([0] * self.input_size[0] * self.input_size[1])

            x = nengo_dl.tensor_layer(inp, tf.layers.conv2d,
                    shape_in = (self.input_size[0], self.input_size[1], channel_each_layer[0]),
                    filters = num_filters[0], kernel_size = kernel_sizes[0], padding = 'same')
            x = nengo_dl.tensor_layer(x, neuron_type)

            for i in range(1, num_layers):
                x = nengo_dl.tensor_layer(x, tf.layers.conv2d,
                        shape_in = (self.input_size[0], self.input_size[1], channel_each_layer[i]),
                        filters = num_filters[i], kernel_size = kernel_sizes[i], padding = 'same')
                x = nengo_dl.tensor_layer(x, neuron_type)

            x = nengo_dl.tensor_layer(x, tf.layers.average_pooling2d,
                    shape_in = (self.input_size[0], self.input_size[1], channel_each_layer[-1]),
                    pool_size = (self.input_size[0], self.input_size[1]),
                    strides = (self.input_size[0], self.input_size[1]))

            x = nengo_dl.tensor_layer(x, tf.layers.dense, units = num_classes)
            out_p = nengo.Probe(x)
            out_p_filt = nengo.Probe(x, synapse = self.synapse)
            return inp, out_p, out_p_filt, net
예제 #6
0
def test_training_arg(Simulator):
    class TrainingLayer(tf.keras.layers.Layer):
        def __init__(self, expected):
            super().__init__()

            self.expected = expected

        def call(self, inputs, training=None):
            tf.assert_equal(training, self.expected)
            return tf.reshape(inputs, (1, 1))

    with nengo.Network() as net:
        node = TensorNode(TrainingLayer(expected=False),
                          shape_in=None,
                          shape_out=(1, ))
        nengo.Probe(node)

    with Simulator(net) as sim:
        sim.predict(n_steps=10)

    with Simulator(net) as sim:
        sim.compile(optimizer=tf.optimizers.SGD(0), loss=tf.losses.mse)
        node.tensor_func.expected = True
        sim.fit(n_steps=10, y=np.zeros((1, 1, 1)))

    with net:
        configure_settings(learning_phase=True)
    with Simulator(net) as sim:
        sim.predict(n_steps=10)
예제 #7
0
def test_nengo_dl_noise(neuron_type, seed, plt, allclose):
    pytest.importorskip("tensorflow")

    install_dl_builders()

    net, rates, _ = rate_nengo_dl_net(neuron_type)
    n_noise = 1000  # number of noise samples per x point

    with net:
        nengo_dl.configure_settings(learning_phase=True)  # run with `training=True`

    with nengo_dl.Simulator(net, dt=net.dt, minibatch_size=n_noise, seed=seed) as sim:
        input_data = {net.stim: np.tile(net.x[None, None, :], (n_noise, 1, 1))}
        sim.step(data=input_data)
        y = sim.data[net.probe][:, 0, :]

    ymean = y.mean(axis=0)
    y25 = np.percentile(y, 25, axis=0)
    y75 = np.percentile(y, 75, axis=0)
    dy25 = y25 - rates["ref"]
    dy75 = y75 - rates["ref"]

    # exponential models roughly fitted to 25/75th percentiles
    x1mask = net.x > 1.5
    x1 = net.x[x1mask]
    if isinstance(neuron_type.nengo_dl_noise, AlphaRCNoise):
        exp_model = 0.7 + 2.8 * np.exp(-0.22 * (x1 - 1))
        atol = 0.12 * exp_model.max()
    elif isinstance(neuron_type.nengo_dl_noise, LowpassRCNoise):
        exp_model = 1.5 + 2.2 * np.exp(-0.22 * (x1 - 1))
        atol = 0.2 * exp_model.max()

    rtol = 0.2
    mu_atol = 0.6  # depends on n_noise and variance of noise

    # --- plots
    plt.subplot(211)
    plt.plot(net.x, rates["med"], "--", label="LIF(tau_ref += 0.5*dt)")
    plt.plot(net.x, ymean, label="nengo_dl")
    plt.plot(net.x, y25, ":", label="25th")
    plt.plot(net.x, y75, ":", label="75th")
    plt.plot(net.x, rates["ref"], "k--", label="LoihiLIF")
    plt.legend()

    plt.subplot(212)
    plt.plot(net.x, ymean - rates["ref"], "b", label="mean")
    plt.plot(net.x, mu_atol * np.ones_like(net.x), "b:")
    plt.plot(net.x, -mu_atol * np.ones_like(net.x), "b:")
    plt.plot(net.x, y25 - rates["ref"], ":", label="25th")
    plt.plot(net.x, y75 - rates["ref"], ":", label="75th")
    plt.plot(x1, exp_model, "k--")
    plt.plot(x1, exp_model * (1 + rtol) + atol, "k:")
    plt.plot(x1, exp_model * (1 - rtol) - atol, "k:")
    plt.plot(x1, -exp_model, "k--")
    plt.legend()

    assert ymean.shape == rates["ref"].shape
    assert allclose(ymean, rates["ref"], atol=mu_atol, record_rmse=False)
    assert allclose(dy25[x1mask], -exp_model, atol=atol, rtol=rtol, record_rmse=False)
    assert allclose(dy75[x1mask], exp_model, atol=atol, rtol=rtol, record_rmse=False)
예제 #8
0
def test_save_load_params(Simulator, tmpdir):
    with nengo.Network(seed=0) as net:
        inp = nengo.Node([0])
        out = nengo.Node(size_in=1)
        ens = nengo.Ensemble(10, 1)
        nengo.Connection(inp, ens)
        nengo.Connection(ens, out)

        configure_settings(trainable=None)
        net.config[ens].trainable = False

    with Simulator(net) as sim:
        weights_var = [
            x[0] for x in sim.tensor_graph.base_vars.values()
            if x[0].get_shape() == (1, 10)
        ][0]
        enc_var = sim.tensor_graph.base_vars[sim.tensor_graph.sig_map[
            sim.model.sig[ens]["encoders"]].key][0]
        weights0, enc0 = sim.sess.run([weights_var, enc_var])
        sim.save_params(os.path.join(str(tmpdir), "train"))
        sim.save_params(os.path.join(str(tmpdir), "local"), include_local=True)

    with pytest.raises(SimulatorClosed):
        sim.save_params(None)
    with pytest.raises(SimulatorClosed):
        sim.load_params(None)

    with nengo.Network(seed=1) as net2:
        inp = nengo.Node([0])
        out = nengo.Node(size_in=1)
        ens = nengo.Ensemble(10, 1)
        nengo.Connection(inp, ens)
        nengo.Connection(ens, out)

        configure_settings(trainable=None)
        net2.config[ens].trainable = False

    with Simulator(net2) as sim:
        weights_var = [
            x[0] for x in sim.tensor_graph.base_vars.values()
            if x[0].get_shape() == (1, 10)
        ][0]
        enc_var = sim.tensor_graph.base_vars[sim.tensor_graph.sig_map[
            sim.model.sig[ens]["encoders"]].key][0]
        weights1, enc1 = sim.sess.run([weights_var, enc_var])
        assert not np.allclose(weights0, weights1)
        assert not np.allclose(enc0, enc1)

        sim.load_params(os.path.join(str(tmpdir), "train"))

        weights2, enc2 = sim.sess.run([weights_var, enc_var])
        assert np.allclose(weights0, weights2)
        assert not np.allclose(enc0, enc2)

        sim.load_params(os.path.join(str(tmpdir), "local"), include_local=True)

        weights3, enc3 = sim.sess.run([weights_var, enc_var])
        assert np.allclose(weights0, weights3)
        assert np.allclose(enc0, enc3)
예제 #9
0
def test_merged_learning(Simulator, rule, weights, seed):
    # a slightly more complicated network with mergeable learning rules, to
    # make sure that works OK
    dimensions = 2
    with nengo.Network(seed=seed) as net:
        configure_settings(planner=partial(graph_optimizer.tree_planner,
                                           max_depth=10),
                           dtype="float64")

        a = nengo.Ensemble(3, dimensions, label="a")
        b = nengo.Ensemble(3, dimensions, label="b")
        c = nengo.Ensemble(5, dimensions, label="c")

        # for PES rules the post (error) shape also has to match for the rules
        # to be mergeable
        d = nengo.Ensemble(5 if rule == nengo.PES else 10,
                           dimensions,
                           label="d")

        conn0 = nengo.Connection(
            a,
            c,
            learning_rule_type=rule(learning_rate=0.1),
            solver=nengo.solvers.LstsqL2(weights=weights),
        )
        conn1 = nengo.Connection(
            b,
            d,
            learning_rule_type=rule(learning_rate=0.2),
            solver=nengo.solvers.LstsqL2(weights=weights),
        )

        p0 = nengo.Probe(conn0.learning_rule, "delta")
        p1 = nengo.Probe(conn1.learning_rule, "delta")

    with nengo.Simulator(net) as sim:
        sim.run_steps(10)

        canonical = (sim.data[p0], sim.data[p1])

    with Simulator(net, minibatch_size=2) as sim:
        build_type = {
            nengo.Voja: SimVoja,
            nengo.Oja: SimOja,
            nengo.BCM: SimBCM,
            nengo.PES: SimPES,
        }

        assert (len([
            x for x in sim.tensor_graph.plan if type(x[0]) == build_type[rule]
        ]) == 1)

        sim.run_steps(10)

        for i in range(sim.minibatch_size):
            assert np.allclose(sim.data[p0][i], canonical[0])
            assert np.allclose(sim.data[p1][i], canonical[1])
예제 #10
0
def run_snn(model,
            x_test,
            y_test,
            params_load_path,
            iteration,
            timesteps=50,
            scale_firing_rates=1000,
            synapse=0.01,
            batch_size=16):
    """
    Run model in spiking setting
    :param batch_size: batch size
    :param model: model reference
    :param x_test: testing features
    :param y_test: testing labels
    :param params_load_path: path to load parameters
    :param iteration: number of current iteration
    :param timesteps: number of timesteps
    :param scale_firing_rates: firing rate scaling
    :param synapse: synaptic smoothing
    :return: accuracy, precision, recall, f1 and confusion matrix from the testing data
    """
    converter = nengo_dl.Converter(
        model,
        swap_activations={tf.nn.relu: nengo.SpikingRectifiedLinear()},
        scale_firing_rates=scale_firing_rates,
        synapse=synapse
    )  # create a Nengo converter object and swap all relu activations with spiking relu

    with converter.net:
        nengo_dl.configure_settings(stateful=False)

    output_layer = converter.outputs[model.get_layer(
        'output_layer')]  # output layer for simulator

    x_test_tiled = np.tile(x_test,
                           (1, timesteps, 1))  # tile test data to timesteps

    with nengo_dl.Simulator(converter.net) as simulator:
        simulator.load_params(params_load_path)

        # Get the statistics
        accuracy, precision, recall, f1, confusion_matrix = get_metrics(
            simulator, output_layer, x_test_tiled, y_test, batch_size,
            f'{iteration}. CNN (SNN conversion)')
        return {
            'accuracy': accuracy,
            'precision': precision,
            'recall': recall,
            'f1': f1,
            'confusion_matrix': confusion_matrix
        }
예제 #11
0
def run_profile(net, train=False, n_steps=150, do_profile=True, **kwargs):
    """
    Run profiler on a benchmark network.

    Parameters
    ----------
    net : :class:`~nengo:nengo.Network`
        The nengo Network to be profiled.
    train : bool, optional
        If True, profile the ``sim.train`` function. Otherwise, profile the
        ``sim.run`` function.
    n_steps : int, optional
        The number of timesteps to run the simulation.
    do_profile : bool, optional
        Whether or not to run profiling

    Notes
    -----
    kwargs will be passed on to :class:`.Simulator`
    """

    with net:
        nengo_dl.configure_settings(trainable=None if train else False)

    with nengo_dl.Simulator(net, **kwargs) as sim:
        # note: we run a few times to try to eliminate startup overhead (only
        # the data from the last run will be kept)
        if train:
            opt = tf.train.GradientDescentOptimizer(0.001)
            x = np.random.randn(sim.minibatch_size, n_steps, net.inp.size_out)
            y = np.random.randn(sim.minibatch_size, n_steps, net.p.size_in)

            for _ in range(2):
                sim.train({net.inp: x}, {net.p: y},
                          optimizer=opt,
                          n_epochs=1,
                          profile=do_profile)

            start = time.time()
            sim.train({net.inp: x}, {net.p: y},
                      optimizer=opt,
                      n_epochs=1,
                      profile=do_profile)
            print("Execution time:", time.time() - start)

        else:
            for _ in range(2):
                sim.run_steps(n_steps, profile=do_profile)

            start = time.time()
            sim.run_steps(n_steps, profile=do_profile)
            print("Execution time:", time.time() - start)
예제 #12
0
def run_profile(net, train=False, n_steps=150, do_profile=True, **kwargs):
    """
    Run profiler on a benchmark network.

    Parameters
    ----------
    net : `~nengo.Network`
        The nengo Network to be profiled.
    train : bool
        If True, profile the ``sim.train`` function. Otherwise, profile the
        ``sim.run`` function.
    n_steps : int
        The number of timesteps to run the simulation.
    do_profile : bool
        Whether or not to run profiling

    Notes
    -----
    kwargs will be passed on to `.Simulator`
    """

    with net:
        nengo_dl.configure_settings(inference_only=not train)

    with nengo_dl.Simulator(net, **kwargs) as sim:
        # note: we run a few times to try to eliminate startup overhead (only
        # the data from the last run will be kept)
        if train:
            opt = tf.train.GradientDescentOptimizer(0.001)
            x = np.random.randn(sim.minibatch_size, n_steps, net.inp.size_out)
            y = np.random.randn(sim.minibatch_size, n_steps, net.p.size_in)

            for _ in range(2):
                sim.train({net.inp: x, net.p: y}, optimizer=opt, n_epochs=1,
                          profile=do_profile)

            start = time.time()
            sim.train({net.inp: x, net.p: y}, optimizer=opt, n_epochs=1,
                      profile=do_profile)
            exec_time = time.time() - start
            print("Execution time:", exec_time)

        else:
            for _ in range(2):
                sim.run_steps(n_steps, profile=do_profile)

            start = time.time()
            sim.run_steps(n_steps, profile=do_profile)
            exec_time = time.time() - start
            print("Execution time:", exec_time)

    return exec_time
예제 #13
0
def test_reuse_vars(Simulator, pytestconfig):
    class MyLayer(tf.keras.layers.Layer):
        def build(self, input_shape):
            self.w = self.add_weight(initializer=tf.initializers.constant(2.0),
                                     name="weights")

        def call(self, x):
            return x * tf.cast(self.w, x.dtype)

    with nengo.Network() as net:
        configure_settings(trainable=False)

        inp = nengo.Node([1])
        node = TensorNode(MyLayer(), shape_in=(1, ), pass_time=False)
        nengo.Connection(inp, node, synapse=None)

        node2 = Layer(
            tf.keras.layers.Dense(
                units=10,
                use_bias=False,
                kernel_initializer=tf.initializers.constant(3),
                dtype=pytestconfig.getoption("--dtype"),
            ))(inp)

        p = nengo.Probe(node)
        p2 = nengo.Probe(node2)

    with Simulator(net, unroll_simulation=5) as sim:
        sim.run_steps(5)
        assert np.allclose(sim.data[p], 2)
        assert np.allclose(sim.data[p2], 3)

        # note: when inference-only=True the weights will be marked as non-trainable
        if sim.tensor_graph.inference_only:
            assert len(sim.tensor_graph.saved_state) == 2
            assert len(sim.keras_model.non_trainable_variables) == 2
            assert len(sim.keras_model.trainable_variables) == 0
            vars = sim.keras_model.non_trainable_variables
        else:
            assert len(sim.tensor_graph.saved_state) == 2
            assert len(sim.keras_model.non_trainable_variables) == 0
            assert len(sim.keras_model.trainable_variables) == 2
            vars = sim.keras_model.trainable_variables

        assert len(vars) == 2
        assert vars[0].shape == ()
        assert tf.keras.backend.get_value(vars[0]) == 2
        assert vars[1].shape == (1, 10)
        assert np.allclose(tf.keras.backend.get_value(vars[1]), 3)
def test_merged_learning(Simulator, rule, weights, seed):
    # a slightly more complicated network with mergeable learning rules, to
    # make sure that works OK
    dimensions = 2
    with nengo.Network(seed=seed) as net:
        configure_settings(
            planner=partial(graph_optimizer.tree_planner, max_depth=10))

        a = nengo.Ensemble(3, dimensions, label="a")
        b = nengo.Ensemble(3, dimensions, label="b")
        c = nengo.Ensemble(5, dimensions, label="c")

        # for PES rules the post (error) shape also has to match for the rules
        # to be mergeable
        d = nengo.Ensemble(5 if rule == nengo.PES else 10, dimensions,
                           label="d")

        conn0 = nengo.Connection(
            a, c, learning_rule_type=rule(),
            solver=nengo.solvers.LstsqL2(weights=weights))
        conn1 = nengo.Connection(
            b, d, learning_rule_type=rule(),
            solver=nengo.solvers.LstsqL2(weights=weights))

        p0 = nengo.Probe(conn0.learning_rule, "delta")
        p1 = nengo.Probe(conn1.learning_rule, "delta")

    with nengo.Simulator(net) as sim:
        sim.run_steps(10)

        canonical = (sim.data[p0], sim.data[p1])

    with Simulator(net) as sim:
        build_type = {nengo.Voja: SimVoja, nengo.Oja: SimOja,
                      nengo.BCM: SimBCM, nengo.PES: SimPES}

        assert len([x for x in sim.tensor_graph.plan
                    if type(x[0]) == build_type[rule]]) == 1

        sim.run_steps(10)

        assert np.allclose(sim.data[p0], canonical[0])
        assert np.allclose(sim.data[p1], canonical[1])
예제 #15
0
    def build_network(neuron_type, ens_params):
        with nengo.Network() as net:
            nengo_dl.configure_settings(trainable=False)

            inp = nengo.Node([0] * 28 * 28)

            x = nengo_dl.tensor_layer(inp,
                                      tf.layers.conv2d,
                                      shape_in=(28, 28, 1),
                                      filters=32,
                                      kernel_size=3)
            x = nengo_dl.tensor_layer(x, neuron_type, **ens_params)

            x = nengo_dl.tensor_layer(x,
                                      tf.layers.conv2d,
                                      shape_in=(26, 26, 32),
                                      filters=64,
                                      kernel_size=3)
            x = nengo_dl.tensor_layer(x, neuron_type, **ens_params)

            x = nengo_dl.tensor_layer(x,
                                      tf.layers.average_pooling2d,
                                      shape_in=(24, 24, 64),
                                      pool_size=2,
                                      strides=2)

            x = nengo_dl.tensor_layer(x,
                                      tf.layers.conv2d,
                                      shape_in=(12, 12, 64),
                                      filters=128,
                                      kernel_size=3)
            x = nengo_dl.tensor_layer(x, neuron_type, **ens_params)

            x = nengo_dl.tensor_layer(x,
                                      tf.layers.average_pooling2d,
                                      shape_in=(10, 10, 128),
                                      pool_size=2,
                                      strides=2)

            x = nengo_dl.tensor_layer(x, tf.layers.dense, units=10)

        return net, inp, x
예제 #16
0
def test_nengo_dl_neurons(neuron_type, inference_only, Simulator, plt, allclose):
    install_dl_builders()

    dt = 0.0007

    n = 256
    encoders = np.ones((n, 1))
    gain = np.zeros(n)
    if isinstance(neuron_type, nengo.SpikingRectifiedLinear):
        bias = np.linspace(0, 1001, n)
    else:
        bias = np.linspace(0, 30, n)

    with nengo.Network() as model:
        nengo_dl.configure_settings(inference_only=inference_only)

        a = nengo.Ensemble(
            n, 1, neuron_type=neuron_type, encoders=encoders, gain=gain, bias=bias
        )
        ap = nengo.Probe(a.neurons)

    t_final = 1.0
    with nengo_dl.Simulator(model, dt=dt) as dl_sim:
        dl_sim.run(t_final)

    with Simulator(model, dt=dt) as loihi_sim:
        loihi_sim.run(t_final)

    rates_dlsim = (dl_sim.data[ap] > 0).sum(axis=0) / t_final
    rates_loihisim = (loihi_sim.data[ap] > 0).sum(axis=0) / t_final

    zeros = np.zeros((1, gain.size))
    rates_ref = neuron_type.rates(zeros, gain, bias, dt=dt).squeeze(axis=0)
    plt.plot(bias, rates_loihisim, "r", label="loihi sim")
    plt.plot(bias, rates_dlsim, "b-.", label="dl sim")
    plt.plot(bias, rates_ref, "k--", label="rates_ref")
    plt.legend(loc="best")

    atol = 1.0 / t_final  # the fundamental unit for our rates
    assert rates_ref.shape == rates_dlsim.shape == rates_loihisim.shape
    assert allclose(rates_dlsim, rates_ref, atol=atol, rtol=0, xtol=1)
    assert allclose(rates_loihisim, rates_ref, atol=atol, rtol=0, xtol=1)
예제 #17
0
def linear_net():
    """
    A simple network with an input, output, and no nonlinearity.
    """

    with nengo.Network() as net:
        a = nengo.Node([1])

        # note: in theory this would be nengo.Node(size_in=1), but due to
        # https://github.com/tensorflow/tensorflow/issues/23383
        # TensorFlow will hang
        b = nengo.Ensemble(1, 1, neuron_type=nengo.RectifiedLinear(),
                           gain=np.ones(1), bias=np.ones(1) * 1e-6)
        configure_settings(trainable=None)
        net.config[b.neurons].trainable = False
        nengo.Connection(a, b.neurons, synapse=None)

        p = nengo.Probe(b.neurons)

    return net, a, p
예제 #18
0
def build_SNN_simple(image_size, config):
    with nengo.Network(seed=config["seed"]) as net:
        # remove some unnecessary features to speed up the training
        nengo_dl.configure_settings(stateful=False)
        n_ensembles = 10000
        # input node

        inp = nengo.Node(np.zeros(image_size[-1]))  #
        u = nengo.networks.EnsembleArray(
            n_neurons=50,
            n_ensembles=n_ensembles,
            neuron_type=nengo.SpikingRectifiedLinear(),
        )
        nengo.Connection(inp, u.input, transform=np.zeros((n_ensembles, 512)))
        out = nengo.Node(size_in=101)
        nengo.Connection(u.output,
                         out,
                         transform=nengo_dl.dists.Glorot(),
                         synapse=None)
        p = nengo.Probe(out)
    return net
예제 #19
0
def linear_net():
    """
    A simple network with an input, output, and no nonlinearity.
    """

    with nengo.Network() as net:
        a = nengo.Node([1])

        # note: in theory this would be nengo.Node(size_in=1), but due to
        # https://github.com/tensorflow/tensorflow/issues/23383
        # TensorFlow will hang
        b = nengo.Ensemble(1,
                           1,
                           neuron_type=nengo.RectifiedLinear(),
                           gain=np.ones(1),
                           bias=np.ones(1) * 1e-6)
        configure_settings(trainable=None)
        net.config[b.neurons].trainable = False
        nengo.Connection(a, b.neurons, synapse=None)

        p = nengo.Probe(b.neurons)

    return net, a, p
예제 #20
0
def build_spaun(dimensions):
    vocab.sp_dim = dimensions
    cfg.mtr_arm_type = None

    cfg.set_seed(1)
    experiment.initialize('A', vis_data.get_image_ind,
                          vis_data.get_image_label,
                          cfg.mtr_est_digit_response_time, cfg.rng)
    vocab.initialize(experiment.num_learn_actions, cfg.rng)
    vocab.initialize_mtr_vocab(mtr_data.dimensions, mtr_data.sps)
    vocab.initialize_vis_vocab(vis_data.dimensions, vis_data.sps)

    with Spaun() as net:
        nengo_dl.configure_settings(
            trainable=False,
            simplifications=[
                graph_optimizer.remove_constant_copies,
                graph_optimizer.remove_unmodified_resets,
                # graph_optimizer.remove_zero_incs,
                graph_optimizer.remove_identity_muls
            ])

    return net
예제 #21
0
def test_reuse_vars(Simulator):
    def my_func(_, x):
        # note: the control dependencies thing is due to some weird tensorflow
        # issue with creating variables inside while loops
        with tf.control_dependencies(None):
            w = tf.get_variable("weights", initializer=tf.constant(2.0))

        return x * tf.cast(w, x.dtype)

    with nengo.Network() as net:
        configure_settings(trainable=False)

        inp = nengo.Node([1])
        node = TensorNode(my_func, size_in=1)
        node2 = TensorNode(
            lambda _, x: tf.layers.dense(
                x, units=10, use_bias=False,
                kernel_initializer=tf.constant_initializer(3)),
            size_in=1, size_out=10)
        p = nengo.Probe(node)
        p2 = nengo.Probe(node2)
        nengo.Connection(inp, node, synapse=None)
        nengo.Connection(inp, node2, synapse=None)

    with Simulator(net, unroll_simulation=5) as sim:
        sim.run_steps(5)
        assert np.allclose(sim.data[p], 2)
        assert np.allclose(sim.data[p2], 3)

        with sim.tensor_graph.graph.as_default():
            vars = tf.trainable_variables()

        assert len(vars) == 2
        assert vars[0].get_shape() == ()
        assert sim.sess.run(vars[0]) == 2
        assert vars[1].get_shape() == (1, 10)
        assert np.allclose(sim.sess.run(vars[1]), 3)
예제 #22
0
    def __init__(self,
                 param_file,
                 dim=256,
                 maze_id_dim=256,
                 n_sensors=36,
                 hidden_size=1024,
                 net_seed=13,
                 n_steps=30):
        self.net = nengo.Network(seed=net_seed)
        with self.net:
            # set some default parameters for the neurons that will make
            # the training progress more smoothly
            # net.config[nengo.Ensemble].max_rates = nengo.dists.Choice([100])
            # net.config[nengo.Ensemble].intercepts = nengo.dists.Choice([0])
            self.net.config[nengo.Connection].synapse = None
            neuron_type = nengo.LIF(amplitude=0.01)

            # this is an optimization to improve the training speed,
            # since we won't require stateful behaviour in this example
            nengo_dl.configure_settings(stateful=False)

            # the input node that will be used to feed in (context, location, goal)
            inp = nengo.Node(np.zeros((n_sensors * 4 + maze_id_dim, )))

            x = nengo_dl.Layer(tf.keras.layers.Dense(units=hidden_size))(inp)
            x = nengo_dl.Layer(neuron_type)(x)

            out = nengo_dl.Layer(tf.keras.layers.Dense(units=dim))(x)

            self.out_p = nengo.Probe(out, label="out_p")
            self.out_p_filt = nengo.Probe(out, synapse=0.1, label="out_p_filt")

        self.sim = nengo_dl.Simulator(self.net, minibatch_size=1)
        self.sim.load_params(param_file)
        self.sim.compile(loss={self.out_p_filt: mse_loss})
        self.n_steps = n_steps
예제 #23
0

    with nengo.Network() as net:
        # set some default parameters for the neurons that will make
        # the training progress more smoothly
        net.config[nengo.Ensemble].max_rates = nengo.dists.Choice([100])
        net.config[nengo.Ensemble].intercepts = nengo.dists.Choice([0])
        neuron_type = nengo.LIF(amplitude=0.01)

        # we'll make all the nengo objects in the network
        # non-trainable. we could train them if we wanted, but they don't
        # add any representational power. note that this doesn't affect
        # the internal components of tensornodes, which will always be
        # trainable or non-trainable depending on the code written in
        # the tensornode.
        nengo_dl.configure_settings(trainable=False)

        # the input node that will be used to feed in input images
        inp = nengo.Node([0] * 28 * 28)

        # add the first convolutional layer
        x = nengo_dl.tensor_layer(
            inp, tf.layers.conv2d, shape_in=(28, 28, 1), filters=32,
            kernel_size=3)

        # apply the neural nonlinearity
        x = nengo_dl.tensor_layer(x, neuron_type)

        # add another convolutional layer
        x = nengo_dl.tensor_layer(
            x, tf.layers.conv2d, shape_in=(26, 26, 32),
예제 #24
0
    def _build_net(self):
        with nengo.Network() as net:
            # set some default parameters for the neurons that will make
            # the training progress more smoothly
            net.config[nengo.Ensemble].max_rates = nengo.dists.Choice([100])
            net.config[nengo.Ensemble].intercepts = nengo.dists.Choice([0])
            neuron_type = nengo.LIF(amplitude=0.01)

            # we'll make all the nengo objects in the network
            # non-trainable. we could train them if we wanted, but they don't
            # add any representational power. note that this doesn't affect
            # the internal components of tensornodes, which will always be
            # trainable or non-trainable depending on the code written in
            # the tensornode.
            nengo_dl.configure_settings(trainable=False)

            # the input node that will be used to feed in input images
            self.inp = nengo.Node([0] * 28 * 28)

            # ENCODER
            # add the first convolutional layer
            x = nengo_dl.tensor_layer(self.inp,
                                      tf.layers.conv2d,
                                      shape_in=(28, 28, 1),
                                      filters=32,
                                      kernel_size=3)

            # apply the neural nonlinearity
            x = nengo_dl.tensor_layer(x, neuron_type)

            # add another convolutional layer
            x = nengo_dl.tensor_layer(x,
                                      tf.layers.conv2d,
                                      shape_in=(26, 26, 32),
                                      filters=64,
                                      kernel_size=3)
            x = nengo_dl.tensor_layer(x, neuron_type)

            # add a pooling layer
            x = nengo_dl.tensor_layer(x,
                                      tf.layers.conv2d,
                                      shape_in=(24, 24, 64),
                                      filters=64,
                                      kernel_size=2,
                                      strides=2)
            # x = nengo_dl.tensor_layer(x, tf.layers.average_pooling2d, shape_in=(24, 24, 64), pool_size=2, strides=2)

            # another convolutional layer
            # (W - Fw + 2P) / Sw + 1
            x = nengo_dl.tensor_layer(x,
                                      tf.layers.conv2d,
                                      shape_in=(12, 12, 64),
                                      filters=128,
                                      kernel_size=3)
            x = nengo_dl.tensor_layer(x, neuron_type)

            # another pooling layer
            x = nengo_dl.tensor_layer(x,
                                      tf.layers.conv2d,
                                      shape_in=(10, 10, 128),
                                      filters=128,
                                      kernel_size=2,
                                      strides=2)
            # x = nengo_dl.tensor_layer(x, tf.layers.average_pooling2d, shape_in=(10, 10, 128), pool_size=2, strides=2)

            # latent
            self.latent = x

            # DECODER
            x = nengo_dl.tensor_layer(x,
                                      tf.layers.conv2d_transpose,
                                      shape_in=(5, 5, 128),
                                      filters=64,
                                      kernel_size=2,
                                      strides=2)
            x = nengo_dl.tensor_layer(x, neuron_type)
            x = nengo_dl.tensor_layer(x,
                                      tf.layers.conv2d_transpose,
                                      shape_in=(12, 12, 64),
                                      filters=64,
                                      kernel_size=3)
            x = nengo_dl.tensor_layer(x,
                                      tf.layers.conv2d_transpose,
                                      shape_in=(24, 24, 64),
                                      filters=32,
                                      kernel_size=2,
                                      strides=2)
            x = nengo_dl.tensor_layer(x, neuron_type)
            x = nengo_dl.tensor_layer(x,
                                      tf.layers.conv2d_transpose,
                                      shape_in=(26, 26, 32),
                                      filters=1,
                                      kernel_size=3)
            x = nengo_dl.tensor_layer(x, neuron_type)
            x = nengo_dl.tensor_layer(self.inp,
                                      tf.layers.conv2d,
                                      shape_in=(28, 28, 1),
                                      filters=1,
                                      kernel_size=3)

            # linear readout
            x = nengo_dl.tensor_layer(x, tf.layers.dense, units=10)
            x = nengo_dl.tensor_layer(x, tf.identity)

            # x = x + out_stim
            self.out_stim = nengo.Node([0] * 10)
            self.stim_conns = attach_stim(self.out_stim, x,
                                          (np.arange(10), np.arange(15)))

            # we'll create two different output probes, one with a filter
            # (for when we're simulating the network over time and
            # accumulating spikes), and one without (for when we're
            # training the network using a rate-based approximation)
            self.out_p = nengo.Probe(x)
            self.out_p_filt = nengo.Probe(x, synapse=0.1)

        return net
def LearningModel(neurons, dimensions, learning_rule, function_to_learn,
                  convolve, seed):
    global decoded

    with nengo.Network() as model:

        nengo_dl.configure_settings(stateful=False)

        model.inp = nengo.Node(
            # WhiteNoise( dist=Gaussian( 0, 0.05 ), seed=seed ),
            WhiteSignal(sim_time, high=5, seed=seed),
            size_out=dimensions[0])
        model.pre = nengo.Ensemble(neurons[0],
                                   dimensions=dimensions[0],
                                   seed=seed)
        model.post = nengo.Ensemble(neurons[1],
                                    dimensions=dimensions[1],
                                    seed=seed)
        model.ground_truth = nengo.Ensemble(neurons[2],
                                            dimensions=dimensions[2],
                                            seed=seed)

        nengo.Connection(model.inp, model.pre)

        if convolve:
            model.conv = nengo.networks.CircularConvolution(neurons[4],
                                                            dimensions[4],
                                                            seed=seed)
            nengo.Connection(model.inp[:int(dimensions[0] / 2)],
                             model.conv.input_a,
                             synapse=None)
            nengo.Connection(model.inp[int(dimensions[0] / 2):],
                             model.conv.input_b,
                             synapse=None)
            nengo.Connection(model.conv.output,
                             model.ground_truth,
                             synapse=None)
        else:
            nengo.Connection(model.inp,
                             model.ground_truth,
                             function=function_to_learn,
                             synapse=None)

        if learning_rule:
            model.error = nengo.Ensemble(neurons[3],
                                         dimensions=dimensions[3],
                                         seed=seed)

            if isinstance(learning_rule,
                          mPES) or (isinstance(learning_rule, PES)
                                    and not decoded):
                model.conn = nengo.Connection(model.pre.neurons,
                                              model.post.neurons,
                                              transform=np.random.random(
                                                  (model.post.n_neurons,
                                                   model.pre.n_neurons)),
                                              learning_rule_type=learning_rule)
            else:
                model.conn = nengo.Connection(
                    model.pre,
                    model.post,
                    function=lambda x: np.random.random(dimensions[1]),
                    learning_rule_type=learning_rule)
            nengo.Connection(model.error, model.conn.learning_rule)
            nengo.Connection(model.post, model.error)
            nengo.Connection(model.ground_truth, model.error, transform=-1)

            class cyclic_inhibit:
                def __init__(self, cycle_time):
                    self.out_inhibit = 0.0
                    self.cycle_time = cycle_time

                def step(self, t):
                    if t % self.cycle_time == 0:
                        if self.out_inhibit == 0.0:
                            self.out_inhibit = 2.0
                        else:
                            self.out_inhibit = 0.0

                    return self.out_inhibit

            model.inhib = nengo.Node(cyclic_inhibit(learn_block_time).step)
            nengo.Connection(model.inhib,
                             model.error.neurons,
                             transform=[[-1]] * model.error.n_neurons)
        else:
            model.conn = nengo.Connection(model.pre,
                                          model.post,
                                          function=function_to_learn)

        # -- probes
        model.pre_probe = nengo.Probe(model.pre, synapse=0.01)
        model.post_probe = nengo.Probe(model.post, synapse=0.01)
        model.ground_truth_probe = nengo.Probe(model.ground_truth,
                                               synapse=0.01)
        # function_learning_model.error_probe = nengo.Probe( function_learning_model.error, synapse=0.03 )

    return model
예제 #26
0
# )

# print("\nData Generation Complete\n")

with nengo.Network(seed=args.net_seed) as net:
    # set some default parameters for the neurons that will make
    # the training progress more smoothly
    # net.config[nengo.Ensemble].max_rates = nengo.dists.Choice([100])
    # net.config[nengo.Ensemble].intercepts = nengo.dists.Choice([0])
    net.config[nengo.Connection].synapse = None
    net.config[nengo.Connection].transform = nengo_dl.dists.Glorot()
    neuron_type = nengo.LIF(amplitude=0.01)

    # this is an optimization to improve the training speed,
    # since we won't require stateful behaviour in this example
    nengo_dl.configure_settings(stateful=False)

    # the input node that will be used to feed in (context, location, goal)
    inp = nengo.Node(np.zeros((args.dim * 2 + args.maze_id_dim, )))

    if '.pkl' in args.param_file:
        print("Loading values from pickle file")

        policy_params = pickle.load(open(args.param_file, 'rb'))
        policy_inp_params = policy_params[0]
        policy_ens_params = policy_params[1]
        policy_out_params = policy_params[2]

        hidden_ens = nengo.Ensemble(
            n_neurons=args.hidden_size,
            dimensions=1,
def compare_backends(ctx, batch, n_neurons):
    load = ctx.obj["load"]
    reps = ctx.obj["reps"]
    device = ctx.obj["device"]
    save = ctx.obj["save"]

    bench_names = ["integrator", "cconv", "basal_ganglia", "pes"]
    n_range = [n_neurons]
    d_range = [64, 128, 192]
    neuron_types = [nengo.RectifiedLinear()]
    backends = ["nengo_dl", "nengo_ocl", "nengo"]
    sim_time = 5.0

    params = list(
        itertools.product(bench_names, n_range, d_range, neuron_types,
                          backends))

    if load:
        with open("compare_backends_%d_data_saved.pkl" % batch, "rb") as f:
            results = pickle.load(f)
    else:
        results = [{
            "times": [],
            "benchmark": bench,
            "n_neurons": n_neurons,
            "dimensions": dimensions,
            "neuron_type": neuron_type,
            "backend": backend
        } for bench, n_neurons, dimensions, neuron_type, backend in params]

    if reps > 0:
        for i, (bench, neurons_per_ens, dimensions, neuron_type,
                backend) in enumerate(params):
            print("%d/%d: %s %s %s %s %s" %
                  (i + 1, len(params), bench, neurons_per_ens, dimensions,
                   neuron_type, backend))

            net = getattr(benchmarks,
                          bench)(dimensions=dimensions,
                                 neurons_per_d=neurons_per_ens // dimensions,
                                 neuron_type=neuron_type)

            with net:
                nengo_dl.configure_settings(inference_only=True)

            if "nengo_dl" in backend:
                sim = nengo_dl.Simulator(net,
                                         unroll_simulation=25,
                                         minibatch_size=batch,
                                         device=device,
                                         progress_bar=False)
            elif backend == "nengo":
                sim = nengo.Simulator(net, progress_bar=False, optimize=True)
            elif backend == "nengo_ocl":
                import nengo_ocl
                sim = nengo_ocl.Simulator(net, progress_bar=False)

            with sim:
                # run once to eliminate startup overhead
                sim.run(0.1, progress_bar=False)

                for _ in range(reps):
                    start = time.time()
                    for b in range(1 if "nengo_dl" in backend else batch):
                        if b > 0:
                            sim.reset()
                        sim.run(sim_time, progress_bar=False)
                    results[i]["times"].append(
                        (time.time() - start) / sim_time)

            print("   ", min(results[i]["times"]), max(results[i]["times"]),
                  np.mean(results[i]["times"]))

        with open("compare_backends_%d_data.pkl" % batch, "wb") as f:
            pickle.dump(results, f)

    # plotting
    subplots = int(np.ceil(np.sqrt(len(bench_names))))
    f, axes = plt.subplots(subplots,
                           subplots,
                           sharey=True,
                           sharex=False,
                           figsize=(5 * subplots, 5 * subplots),
                           gridspec_kw={
                               "hspace": 0.2,
                               "top": 0.95,
                               "bottom": 0.05,
                               "left": 0.07,
                               "right": 0.95
                           })
    n_bars = len(d_range)
    neuron_type = nengo.RectifiedLinear()
    colours = plt.rcParams["axes.prop_cycle"].by_key()["color"]
    y_max = 2.5 * batch
    for k, m in enumerate(bench_names):
        subplot_idx = (k // subplots, k % subplots)
        x_pos = np.arange(n_bars)
        for j, b in enumerate(backends):
            bottoms = np.zeros(n_bars)
            c = 0
            for n in n_range:
                data = np.asarray([
                    bootstrap_ci(t)
                    for t in filter_results(results,
                                            benchmark=m,
                                            neuron_type=neuron_type,
                                            n_neurons=n,
                                            backend=b)
                ])

                axes[subplot_idx].bar(
                    x_pos,
                    data[:, 0],
                    yerr=abs(np.transpose(data[:, 1:] - data[:, [0]])),
                    width=0.5,
                    bottom=bottoms,
                    color=colours[(j + 1) % len(backends)])

                for i, d in enumerate(data[:, 0]):
                    if d > y_max:
                        axes[subplot_idx].annotate("%.1f" % d,
                                                   (x_pos[i], y_max * 0.9),
                                                   ha="center",
                                                   va="center",
                                                   rotation="vertical",
                                                   color="white")

                bottoms += data[:, 0]
                c += 1
            x_pos += n_bars + 1

        axes[subplot_idx].set_title("%s" % m)
        if k == 0 and len(n_range) > 1:
            axes[subplot_idx].legend(["N=%d" % n for n in n_range])
        axes[subplot_idx].set_xticks(
            np.concatenate([
                np.arange(i * (n_bars + 1),
                          i * (n_bars + 1) + n_bars)
                for i in range(len(backends))
            ]))
        axes[subplot_idx].set_xticklabels(
            [t for _ in range(len(backends)) for t in d_range])
        for i, b in enumerate(backends):
            axes[subplot_idx].annotate(
                b, (((n_bars - 1) / 2 + (n_bars + 1) * i + 1) /
                    ((n_bars + 1) * len(backends)), -0.1),
                xycoords="axes fraction",
                ha="center")

        axes[subplot_idx].set_ylim([0, y_max])
        axes[subplot_idx].set_xlim([-1, (n_bars + 1) * len(backends) - 1])

        if k % subplots == 0:
            axes[subplot_idx].set_ylabel("real time / simulated time")

    if save:
        plt.savefig("compare_backends_%d.%s" % (batch, save))
def spiking_mnist(ctx, n_epochs):
    load = ctx.obj["load"]
    reps = ctx.obj["reps"]

    neuron_type = nengo.LIF(amplitude=0.01)
    ens_params = dict(max_rates=nengo.dists.Choice([100]),
                      intercepts=nengo.dists.Choice([0]))
    minibatch_size = 200
    n_steps = 50

    with nengo.Network() as net:
        nengo_dl.configure_settings(trainable=False)

        inp = nengo.Node([0] * 28 * 28)

        x = nengo_dl.tensor_layer(inp,
                                  tf.layers.conv2d,
                                  shape_in=(28, 28, 1),
                                  filters=32,
                                  kernel_size=3)
        x = nengo_dl.tensor_layer(x, neuron_type, **ens_params)

        x = nengo_dl.tensor_layer(x,
                                  tf.layers.conv2d,
                                  shape_in=(26, 26, 32),
                                  filters=64,
                                  kernel_size=3)
        x = nengo_dl.tensor_layer(x, neuron_type, **ens_params)

        x = nengo_dl.tensor_layer(x,
                                  tf.layers.average_pooling2d,
                                  shape_in=(24, 24, 64),
                                  pool_size=2,
                                  strides=2)

        x = nengo_dl.tensor_layer(x,
                                  tf.layers.conv2d,
                                  shape_in=(12, 12, 64),
                                  filters=128,
                                  kernel_size=3)
        x = nengo_dl.tensor_layer(x, neuron_type, **ens_params)

        x = nengo_dl.tensor_layer(x,
                                  tf.layers.average_pooling2d,
                                  shape_in=(10, 10, 128),
                                  pool_size=2,
                                  strides=2)

        out = nengo_dl.tensor_layer(x, tf.layers.dense, units=10)

        out_p = nengo.Probe(out)
        spk_out_p = nengo.Probe(out, synapse=0.1)

    if load:
        with open("spiking_mnist_data_saved.pkl", "rb") as f:
            results = pickle.load(f)
    else:
        results = {"pre": [], "post": [], "spiking": []}

    urlretrieve("http://deeplearning.net/data/mnist/mnist.pkl.gz",
                "mnist.pkl.gz")
    with gzip.open("mnist.pkl.gz") as f:
        train_data, _, test_data = pickle.load(f, encoding="latin1")
    train_data = list(train_data)
    test_data = list(test_data)
    for data in (train_data, test_data):
        one_hot = np.zeros((data[0].shape[0], 10))
        one_hot[np.arange(data[0].shape[0]), data[1]] = 1
        data[1] = one_hot

    train_data = {
        inp: train_data[0][:, None, :],
        out_p: train_data[1][:, None, :]
    }
    test_data = {
        inp: test_data[0][:, None, :],
        out_p: test_data[1][:, None, :]
    }
    test_data_time = {
        inp: np.tile(test_data[inp], (1, n_steps, 1)),
        spk_out_p: np.tile(test_data[out_p], (1, n_steps, 1))
    }

    for _ in range(reps):
        # construct the simulator
        with nengo_dl.Simulator(net, minibatch_size=minibatch_size) as sim:

            def objective(x, y):
                return tf.nn.softmax_cross_entropy_with_logits_v2(logits=x,
                                                                  labels=y)

            opt = tf.train.RMSPropOptimizer(learning_rate=0.001)

            def classification_error(outputs, targets):
                return 100 * tf.reduce_mean(
                    tf.cast(
                        tf.not_equal(tf.argmax(outputs[:, -1], axis=-1),
                                     tf.argmax(targets[:, -1], axis=-1)),
                        tf.float32))

            # collect error before training
            results["pre"].append(
                sim.loss(test_data, {out_p: classification_error},
                         training=True))
            print("error before training: %.2f%%" % results["pre"][-1])

            # run training
            sim.train(train_data,
                      opt,
                      objective={out_p: objective},
                      n_epochs=n_epochs)

            # collect error after training
            results["post"].append(
                sim.loss(test_data, {out_p: classification_error},
                         training=True))
            print("error after training: %.2f%%" % results["post"][-1])

            # collect spiking error
            results["spiking"].append(
                sim.loss(test_data_time, {spk_out_p: classification_error},
                         training=False))
            print("spiking neuron error: %.2f%%" % results["spiking"][-1])

        with open("spiking_mnist_data.pkl", "wb") as f:
            pickle.dump(results, f)

    print("pre", bootstrap_ci(results["pre"]))
    print("post", bootstrap_ci(results["post"]))
    print("spiking", bootstrap_ci(results["spiking"]))
def compare_simplifications(ctx, dimensions):
    load = ctx.obj["load"]
    reps = ctx.obj["reps"]
    device = ctx.obj["device"]

    simplifications = [
        graph_optimizer.remove_constant_copies,
        graph_optimizer.remove_unmodified_resets,
        graph_optimizer.remove_zero_incs, graph_optimizer.remove_identity_muls
    ]

    params = list(itertools.product((False, True),
                                    repeat=len(simplifications)))

    if load:
        with open("compare_simplifications_data.pkl", "rb") as f:
            results = pickle.load(f)
    else:
        results = [
            dict([("times", [])] + [(s.__name__, p[i])
                                    for i, s in enumerate(simplifications)])
            for j, p in enumerate(params)
        ]

    # net = build_spaun(dimensions)
    net = benchmarks.random_network(dimensions, 32, nengo.RectifiedLinear(),
                                    1000, 100)
    with net:
        nengo_dl.configure_settings(inference_only=True)
    model = nengo.builder.Model(dt=0.001,
                                builder=nengo_dl.builder.NengoBuilder())
    model.build(net)

    if reps > 0:
        # pre-heat
        with nengo_dl.Simulator(None,
                                model=model,
                                device=device,
                                progress_bar=False) as sim:
            sim.run(reps, progress_bar=False)

        for j, p in enumerate(params):
            simps = []
            for i, s in enumerate(p):
                if s:
                    simps.append(simplifications[i])

            with net:
                nengo_dl.configure_settings(simplifications=simps)

            print("%d/%d" % (j + 1, len(params)), [x.__name__ for x in simps])

            with nengo_dl.Simulator(None,
                                    model=model,
                                    unroll_simulation=10,
                                    device=device,
                                    progress_bar=False) as sim:
                sim.run(0.1, progress_bar=False)

                sim_time = 1.0
                for _ in range(reps):
                    start = time.time()
                    sim.run(sim_time, progress_bar=False)
                    results[j]["times"].append(
                        (time.time() - start) / sim_time)

            print("   ", min(results[j]["times"]), max(results[j]["times"]),
                  np.mean(results[j]["times"]))

        with open("compare_simplifications_data.pkl", "wb") as f:
            pickle.dump(results, f)
def compare_optimizations(ctx, dimensions, unroll):
    load = ctx.obj["load"]
    reps = ctx.obj["reps"]
    device = ctx.obj["device"]
    save = ctx.obj["save"]

    # optimizations to apply (simplifications, merging, sorting, unroll)
    params = [
        (False, False, False, False),
        (False, False, False, True),
        (False, True, False, True),
        (False, True, True, True),
        (True, True, True, True),
    ]
    # params = list(itertools.product((False, True), repeat=4))

    if load:
        with open("compare_optimizations_%d_data_saved.pkl" % dimensions,
                  "rb") as f:
            results = pickle.load(f)
    else:
        results = [{
            "times": [],
            "simplifications": simp,
            "planner": plan,
            "sorting": sort,
            "unroll": unro
        } for simp, plan, sort, unro in params]

    if reps > 0:
        with benchmarks.spaun(dimensions) as net:
            nengo_dl.configure_settings(inference_only=True)
        model = nengo.builder.Model(dt=0.001,
                                    builder=nengo_dl.builder.NengoBuilder())
        model.build(net)

        print("neurons", net.n_neurons)
        print("ensembles", len(net.all_ensembles))
        print("connections", len(net.all_connections))

        for i, (simp, plan, sort, unro) in enumerate(params):
            print("%d/%d: %s %s %s %s" %
                  (i + 1, len(params), simp, plan, sort, unro))
            with net:
                config = dict()
                config["simplifications"] = ([
                    graph_optimizer.remove_constant_copies,
                    graph_optimizer.remove_unmodified_resets,
                    # graph_optimizer.remove_zero_incs,
                    graph_optimizer.remove_identity_muls
                ] if simp else [])

                config["planner"] = (graph_optimizer.tree_planner if plan else
                                     graph_optimizer.greedy_planner)

                config["sorter"] = (graph_optimizer.order_signals if sort else
                                    graph_optimizer.noop_order_signals)

                nengo_dl.configure_settings(**config)

            with nengo_dl.Simulator(None,
                                    model=model,
                                    unroll_simulation=unroll if unro else 1,
                                    device=device) as sim:
                sim.run(0.1)

                sim_time = 1.0

                for _ in range(reps):
                    start = time.time()
                    sim.run(sim_time)
                    results[i]["times"].append(
                        (time.time() - start) / sim_time)

            print("   ", min(results[i]["times"]), max(results[i]["times"]),
                  np.mean(results[i]["times"]))

        with open("compare_optimizations_%d_data.pkl" % dimensions, "wb") as f:
            pickle.dump(results, f)

    data = np.asarray([bootstrap_ci(x) for x in filter_results(results)])

    plt.figure()

    alphas = np.linspace(0.5, 1, len(results))
    colour = plt.rcParams["axes.prop_cycle"].by_key()["color"][0]
    for i in range(len(results)):
        plt.bar([i], [data[i, 0]],
                yerr=abs(data[i, 1:] - data[i, [0]])[:, None],
                log=True,
                alpha=alphas[i],
                color=colour)

    labels = []
    for r in results:
        lab = "merging\n"
        if r["unroll"]:
            lab += "unrolling\n"
        if r["planner"]:
            lab += "planning\n"
        if r["sorting"]:
            lab += "sorting\n"
        if r["simplifications"]:
            lab += "simplifications\n"

        labels.append(lab[:-1])
    plt.xticks(np.arange(len(results)), labels, rotation="vertical")
    plt.ylabel("real time / simulated time")

    plt.tight_layout()

    if save:
        plt.savefig("compare_optimizations_%d.%s" % (dimensions, save))
예제 #31
0
def run_profile(
    net, train=False, n_steps=150, do_profile=True, reps=1, dtype="float32", **kwargs
):
    """
    Run profiler on a benchmark network.

    Parameters
    ----------
    net : `~nengo.Network`
        The nengo Network to be profiled.
    train : bool
        If True, profile the `.Simulator.fit` function. Otherwise, profile the
        `.Simulator.run` function.
    n_steps : int
        The number of timesteps to run the simulation.
    do_profile : bool
        Whether or not to run profiling
    reps : int
        Repeat the run this many times (only profile data from the last
        run will be kept).
    dtype : str
        Simulation dtype (e.g. "float32")

    Returns
    -------
    exec_time : float
        Time (in seconds) taken to run the benchmark, taking the minimum over
        ``reps``.

    Notes
    -----
    kwargs will be passed on to `.Simulator`
    """

    exec_time = 1e10
    n_batches = 1

    with net:
        nengo_dl.configure_settings(inference_only=not train, dtype=dtype)

    with nengo_dl.Simulator(net, **kwargs) as sim:
        if hasattr(net, "inp"):
            x = {
                net.inp: np.random.randn(
                    sim.minibatch_size * n_batches, n_steps, net.inp.size_out
                )
            }
        elif hasattr(net, "inp_a"):
            x = {
                net.inp_a: np.random.randn(
                    sim.minibatch_size * n_batches, n_steps, net.inp_a.size_out
                ),
                net.inp_b: np.random.randn(
                    sim.minibatch_size * n_batches, n_steps, net.inp_b.size_out
                ),
            }
        else:
            x = None

        if train:
            y = {
                net.p: np.random.randn(
                    sim.minibatch_size * n_batches, n_steps, net.p.size_in
                )
            }

            sim.compile(tf.optimizers.SGD(0.001), loss=tf.losses.mse)

            # run once to eliminate startup overhead
            start = timeit.default_timer()
            sim.fit(x, y, epochs=1, n_steps=n_steps)
            print("Warmup time:", timeit.default_timer() - start)

            for _ in range(reps):
                if do_profile:
                    tf.profiler.experimental.start("profile")
                start = timeit.default_timer()
                sim.fit(x, y, epochs=1, n_steps=n_steps)
                exec_time = min(timeit.default_timer() - start, exec_time)
                if do_profile:
                    tf.profiler.experimental.stop()

        else:
            # run once to eliminate startup overhead
            start = timeit.default_timer()
            sim.predict(x, n_steps=n_steps)
            print("Warmup time:", timeit.default_timer() - start)

            for _ in range(reps):
                if do_profile:
                    tf.profiler.experimental.start("profile")
                start = timeit.default_timer()
                sim.predict(x, n_steps=n_steps)
                exec_time = min(timeit.default_timer() - start, exec_time)
                if do_profile:
                    tf.profiler.experimental.stop()

    exec_time /= n_batches

    print("Execution time:", exec_time)

    return exec_time
예제 #32
0
            def __init__(self, units, order, theta, input_d, **kwargs):
                super().__init__(**kwargs)

                # compute the A and B matrices according to the LMU's mathematical
                # derivation (see the paper for details)
                Q = np.arange(order, dtype=np.float64)
                R = (2 * Q + 1)[:, None] / theta
                j, i = np.meshgrid(Q, Q)

                A = np.where(i < j, -1, (-1.0) ** (i - j + 1)) * R
                B = (-1.0) ** Q[:, None] * R
                C = np.ones((1, order))
                D = np.zeros((1,))

                A, B, _, _, _ = cont2discrete((A, B, C, D), dt=1.0, method="zoh")

                with self:
                    nengo_dl.configure_settings(trainable=None)

                    # create objects corresponding to the x/u/m/h variables in LMU
                    self.x = nengo.Node(size_in=input_d)
                    self.u = nengo.Node(size_in=1)
                    self.m = nengo.Node(size_in=order)
                    self.h = nengo_dl.TensorNode(
                        tf.nn.tanh, shape_in=(units,), pass_time=False
                    )

                    # compute u_t
                    # note that setting synapse=0 (versus synapse=None) adds a
                    # one-timestep delay, so we can think of any connections with
                    # synapse=0 as representing value_{t-1}
                    nengo.Connection(
                        self.x, self.u, transform=np.ones((1, input_d)), synapse=None
                    )
                    nengo.Connection(
                        self.h, self.u, transform=np.zeros((1, units)), synapse=0
                    )
                    nengo.Connection(
                        self.m, self.u, transform=np.zeros((1, order)), synapse=0
                    )

                    # compute m_t
                    # in this implementation we'll make A and B non-trainable, but they
                    # could also be optimized in the same way as the other parameters
                    conn = nengo.Connection(self.m, self.m, transform=A, synapse=0)
                    self.config[conn].trainable = False
                    conn = nengo.Connection(self.u, self.m, transform=B, synapse=None)
                    self.config[conn].trainable = False

                    # compute h_t
                    nengo.Connection(
                        self.x,
                        self.h,
                        transform=np.zeros((units, input_d)),
                        synapse=None,
                    )
                    nengo.Connection(
                        self.h, self.h, transform=np.zeros((units, units)), synapse=0
                    )
                    nengo.Connection(
                        self.m,
                        self.h,
                        transform=nengo_dl.dists.Glorot(distribution="normal"),
                        synapse=None,
                    )
예제 #33
0
def lmu(theta, input_d, native_nengo=False, dtype="float32"):
    """
    A network containing a single Legendre Memory Unit cell and dense readout.

    See [1]_ for more details.

    Parameters
    ----------
    theta : int
        Time window parameter for LMU.
    input_d : int
        Dimensionality of input signal.
    native_nengo : bool
        If True, build the LMU out of Nengo objects. Otherwise, build the LMU
        directly in TensorFlow, and use a `.TensorNode` to wrap the whole cell.
    dtype : str
        Float dtype to use for internal parameters of LMU when ``native_nengo=False``
        (``native_nengo=True`` will use the dtype of the Simulator).

    Returns
    -------
    net : `nengo.Network`
        Benchmark network

    References
    ----------
    .. [1] Aaron R. Voelker, Ivana Kajić, and Chris Eliasmith. Legendre memory units:
       continuous-time representation in recurrent neural networks.
       In Advances in Neural Information Processing Systems. 2019.
       https://papers.nips.cc/paper/9689-legendre-memory-units-continuous-time-representation-in-recurrent-neural-networks.
    """
    if native_nengo:
        # building LMU cell directly out of Nengo objects

        class LMUCell(nengo.Network):
            """Implements an LMU cell as a Nengo network."""

            def __init__(self, units, order, theta, input_d, **kwargs):
                super().__init__(**kwargs)

                # compute the A and B matrices according to the LMU's mathematical
                # derivation (see the paper for details)
                Q = np.arange(order, dtype=np.float64)
                R = (2 * Q + 1)[:, None] / theta
                j, i = np.meshgrid(Q, Q)

                A = np.where(i < j, -1, (-1.0) ** (i - j + 1)) * R
                B = (-1.0) ** Q[:, None] * R
                C = np.ones((1, order))
                D = np.zeros((1,))

                A, B, _, _, _ = cont2discrete((A, B, C, D), dt=1.0, method="zoh")

                with self:
                    nengo_dl.configure_settings(trainable=None)

                    # create objects corresponding to the x/u/m/h variables in LMU
                    self.x = nengo.Node(size_in=input_d)
                    self.u = nengo.Node(size_in=1)
                    self.m = nengo.Node(size_in=order)
                    self.h = nengo_dl.TensorNode(
                        tf.nn.tanh, shape_in=(units,), pass_time=False
                    )

                    # compute u_t
                    # note that setting synapse=0 (versus synapse=None) adds a
                    # one-timestep delay, so we can think of any connections with
                    # synapse=0 as representing value_{t-1}
                    nengo.Connection(
                        self.x, self.u, transform=np.ones((1, input_d)), synapse=None
                    )
                    nengo.Connection(
                        self.h, self.u, transform=np.zeros((1, units)), synapse=0
                    )
                    nengo.Connection(
                        self.m, self.u, transform=np.zeros((1, order)), synapse=0
                    )

                    # compute m_t
                    # in this implementation we'll make A and B non-trainable, but they
                    # could also be optimized in the same way as the other parameters
                    conn = nengo.Connection(self.m, self.m, transform=A, synapse=0)
                    self.config[conn].trainable = False
                    conn = nengo.Connection(self.u, self.m, transform=B, synapse=None)
                    self.config[conn].trainable = False

                    # compute h_t
                    nengo.Connection(
                        self.x,
                        self.h,
                        transform=np.zeros((units, input_d)),
                        synapse=None,
                    )
                    nengo.Connection(
                        self.h, self.h, transform=np.zeros((units, units)), synapse=0
                    )
                    nengo.Connection(
                        self.m,
                        self.h,
                        transform=nengo_dl.dists.Glorot(distribution="normal"),
                        synapse=None,
                    )

        with nengo.Network(seed=0) as net:
            # remove some unnecessary features to speed up the training
            nengo_dl.configure_settings(
                trainable=None, stateful=False, keep_history=False,
            )

            # input node
            net.inp = nengo.Node(np.zeros(input_d))

            # lmu cell
            lmu_cell = LMUCell(units=212, order=256, theta=theta, input_d=input_d)
            conn = nengo.Connection(net.inp, lmu_cell.x, synapse=None)
            net.config[conn].trainable = False

            # dense linear readout
            out = nengo.Node(size_in=10)
            nengo.Connection(
                lmu_cell.h, out, transform=nengo_dl.dists.Glorot(), synapse=None
            )

            # record output. note that we set keep_history=False above, so this will
            # only record the output on the last timestep (which is all we need
            # on this task)
            net.p = nengo.Probe(out)
    else:
        # putting everything in a tensornode

        # define LMUCell
        class LMUCell(tf.keras.layers.AbstractRNNCell):
            """Implement LMU as Keras RNN cell."""

            def __init__(self, units, order, theta, **kwargs):
                super().__init__(**kwargs)

                self.units = units
                self.order = order
                self.theta = theta

                Q = np.arange(order, dtype=np.float64)
                R = (2 * Q + 1)[:, None] / theta
                j, i = np.meshgrid(Q, Q)

                A = np.where(i < j, -1, (-1.0) ** (i - j + 1)) * R
                B = (-1.0) ** Q[:, None] * R
                C = np.ones((1, order))
                D = np.zeros((1,))

                self._A, self._B, _, _, _ = cont2discrete(
                    (A, B, C, D), dt=1.0, method="zoh"
                )

            @property
            def state_size(self):
                """Size of RNN state variables."""
                return self.units, self.order

            @property
            def output_size(self):
                """Size of cell output."""
                return self.units

            def build(self, input_shape):
                """Set up all the weight matrices used inside the cell."""

                super().build(input_shape)

                input_dim = input_shape[-1]
                self.input_encoders = self.add_weight(
                    shape=(input_dim, 1), initializer=tf.initializers.ones(),
                )
                self.hidden_encoders = self.add_weight(
                    shape=(self.units, 1), initializer=tf.initializers.zeros(),
                )
                self.memory_encoders = self.add_weight(
                    shape=(self.order, 1), initializer=tf.initializers.zeros(),
                )
                self.input_kernel = self.add_weight(
                    shape=(input_dim, self.units), initializer=tf.initializers.zeros(),
                )
                self.hidden_kernel = self.add_weight(
                    shape=(self.units, self.units), initializer=tf.initializers.zeros(),
                )
                self.memory_kernel = self.add_weight(
                    shape=(self.order, self.units),
                    initializer=tf.initializers.glorot_normal(),
                )
                self.AT = self.add_weight(
                    shape=(self.order, self.order),
                    initializer=tf.initializers.constant(self._A.T),
                    trainable=False,
                )
                self.BT = self.add_weight(
                    shape=(1, self.order),
                    initializer=tf.initializers.constant(self._B.T),
                    trainable=False,
                )

            def call(self, inputs, states):
                """Compute cell output and state updates."""

                h_prev, m_prev = states

                # compute u_t from the above diagram
                u = (
                    tf.matmul(inputs, self.input_encoders)
                    + tf.matmul(h_prev, self.hidden_encoders)
                    + tf.matmul(m_prev, self.memory_encoders)
                )

                # compute updated memory state vector (m_t in diagram)
                m = tf.matmul(m_prev, self.AT) + tf.matmul(u, self.BT)

                # compute updated hidden state vector (h_t in diagram)
                h = tf.nn.tanh(
                    tf.matmul(inputs, self.input_kernel)
                    + tf.matmul(h_prev, self.hidden_kernel)
                    + tf.matmul(m, self.memory_kernel)
                )

                return h, [h, m]

        with nengo.Network(seed=0) as net:
            # remove some unnecessary features to speed up the training
            # we could set use_loop=False as well here, but leaving it for parity
            # with native_nengo
            nengo_dl.configure_settings(stateful=False)

            net.inp = nengo.Node(np.zeros(theta))

            rnn = nengo_dl.Layer(
                tf.keras.layers.RNN(
                    LMUCell(units=212, order=256, theta=theta, dtype=dtype),
                    return_sequences=False,
                )
            )(net.inp, shape_in=(theta, input_d))

            out = nengo.Node(size_in=10)
            nengo.Connection(rnn, out, transform=nengo_dl.dists.Glorot(), synapse=None)

            net.p = nengo.Probe(out)

    return net
예제 #34
0
def run_network(activation,
                params_file="./keras_to_loihi_params",
                n_steps=30,
                scale_firing_rates=1,
                synapse=None,
                n_test=100,
                n_plots=1,
                plot_idx=-1):
    # convert the keras model to a nengo network
    nengo_converter = nengo_dl.Converter(
        model,
        scale_firing_rates=scale_firing_rates,
        swap_activations={tf.nn.relu: activation},
        synapse=synapse,
    )

    print_neurons_type(nengo_converter)

    # get input/output objects
    nengo_input = nengo_converter.inputs[inp]
    nengo_output = nengo_converter.outputs[output]

    # add probes to layers to record activity
    with nengo_converter.net:
        probes = collections.OrderedDict([
            [L1_layer, nengo.Probe(nengo_converter.layers[L1])],
            [L2_layer, nengo.Probe(nengo_converter.layers[L2])],
            [L3_layer, nengo.Probe(nengo_converter.layers[L3])],
        ])

    # repeat inputs for some number of timesteps
    tiled_test_data = np.tile(test_data[:n_test], (1, n_steps, 1))

    # set some options to speed up simulation
    with nengo_converter.net:
        nengo_dl.configure_settings(stateful=False)

    # build network, load in trained weights, run inference on test images
    with nengo_dl.Simulator(nengo_converter.net,
                            minibatch_size=1,
                            progress_bar=False) as nengo_sim:
        nengo_sim.load_params(params_file)
        data = nengo_sim.predict({nengo_input: tiled_test_data})

    # compute accuracy on test data, using output of network on last timestep
    test_predictions = np.argmax(data[nengo_output][:, -1], axis=-1)
    correct = test_truth[:n_test, 0, 0]
    print("Test accuracy: %.2f%%" %
          (100 * np.mean(test_predictions == correct)))

    predicted = np.array(test_predictions, dtype=int)
    correct = np.array(correct, dtype=int)

    # Plot normalized confusion matrix
    plot_confusion_matrix(correct,
                          predicted,
                          classes=class_names,
                          normalize=True,
                          title='Normalized confusion matrix')
    plt.savefig(outdir + f'/{plot_idx}_confusion_matrix.jpg')

    # plot the results
    mean_rates = []
    for i in range(n_plots):
        plt.figure(figsize=(12, 6))

        plt.subplot(1, 2, 1)
        # TODO: add a plot of current input signal
        # plt.title("Input signal")
        # plt.axis("off")

        n_layers = len(probes)
        mean_rates_i = []
        for j, layer in enumerate(probes.keys()):
            probe = probes[layer]
            plt.subplot(n_layers, 3, (j * 3) + 2)
            plt.suptitle("Neural activities")

            outputs = data[probe][i]

            # look at only at non-zero outputs
            nonzero = (outputs > 0).any(axis=0)
            outputs = outputs[:, nonzero] if sum(nonzero) > 0 else outputs

            # undo neuron amplitude to get real firing rates
            outputs /= nengo_converter.layers[
                layer].ensemble.neuron_type.amplitude

            rates = outputs.mean(axis=0)
            mean_rate = rates.mean()
            mean_rates_i.append(mean_rate)
            print('"%s" mean firing rate (example %d): %0.1f' %
                  (layer.name, i, mean_rate))

            if is_spiking_type(activation):
                outputs *= 0.001
                plt.ylabel("# of Spikes")
            else:
                plt.ylabel("Firing rates (Hz)")

            # plot outputs of first 100 neurons
            plt.plot(outputs[:, :100])

        mean_rates.append(mean_rates_i)

        plt.xlabel("Timestep")

        plt.subplot(1, 3, 3)
        plt.title("Output predictions")
        plt.plot(tf.nn.softmax(data[nengo_output][i]))
        plt.legend([str(j) for j in range(10)], loc="upper left")
        plt.xlabel("Timestep")
        plt.ylabel("Probability")

        plt.tight_layout()

    # take mean rates across all plotted examples
    mean_rates = np.array(mean_rates).mean(axis=0)

    return mean_rates