Пример #1
0
def test_scale_firing_rates():
    inp = tf.keras.Input(shape=(1, ))
    x = tf.keras.layers.ReLU()(inp)
    model = tf.keras.Model(inp, x)

    # scaling doesn't affect output at all for non-spiking neurons
    conv = converter.Converter(model, scale_firing_rates=5)
    assert conv.verify()

    # works with existing amplitude values
    neuron_type = nengo.RectifiedLinear(amplitude=2)
    conv = converter.Converter(
        model,
        scale_firing_rates=5,
        swap_activations={nengo.RectifiedLinear(): neuron_type},
    )
    assert neuron_type.amplitude == 2
    assert conv.net.ensembles[0].neuron_type.amplitude == 2 / 5

    # warning when applying scaling to non-amplitude neuron type
    inp = tf.keras.Input(shape=(1, ))
    x = tf.keras.layers.Activation(tf.nn.sigmoid)(inp)
    model = tf.keras.Model(inp, x)

    with pytest.warns(UserWarning, match="does not support amplitude"):
        conv = converter.Converter(model, scale_firing_rates=5)

    with pytest.raises(ValueError, match="does not match output"):
        conv.verify()
Пример #2
0
def test_activation():
    inp = x = tf.keras.Input(shape=(4, ))
    x = tf.keras.layers.Activation("relu")(x)
    x = tf.keras.layers.Activation(tf.nn.relu)(x)
    x = tf.keras.layers.Activation(tf.keras.activations.relu)(x)
    x = tf.keras.layers.Activation("sigmoid")(x)
    x = tf.keras.layers.Activation(tf.nn.sigmoid)(x)
    x = tf.keras.layers.Activation(tf.keras.activations.sigmoid)(x)
    if version.parse(nengo.__version__) >= version.parse("3.1.0.dev0"):
        x = tf.keras.layers.Activation("tanh")(x)
        x = tf.keras.layers.Activation(tf.nn.tanh)(x)
        x = tf.keras.layers.Activation(tf.keras.activations.tanh)(x)

    _test_convert(inp, x)

    inp = x = tf.keras.Input(shape=(4, ))
    x = tf.keras.layers.Activation(tf.keras.activations.elu)(x)

    model = tf.keras.Model(inp, x)

    with pytest.raises(TypeError, match="Unsupported activation type"):
        converter.Converter(model, allow_fallback=False)

    with pytest.warns(UserWarning, match="falling back to a TensorNode"):
        conv = converter.Converter(model, allow_fallback=True)
    assert conv.verify(training=False)
    assert conv.verify(training=True)
Пример #3
0
def test_unsupported_args():
    inp = x = tf.keras.Input(shape=(4, 1))
    x = tf.keras.layers.Conv1D(1, 1, kernel_regularizer=tf.keras.regularizers.l1(0.1))(
        x
    )

    model = tf.keras.Model(inp, x)

    with pytest.raises(
        TypeError,
        match="kernel_regularizer has value .* != None.*unless inference_only=True",
    ):
        converter.Converter(model, allow_fallback=False)

    with pytest.warns(
        UserWarning,
        match="kernel_regularizer has value .* != None.*unless inference_only=True",
    ):
        conv = converter.Converter(model, allow_fallback=True)
    assert conv.verify(training=False)
    assert conv.verify(training=True)

    inp = x = tf.keras.Input(shape=(4, 1))
    x = tf.keras.layers.Conv1D(1, 1, dilation_rate=(2,))(x)

    model = tf.keras.Model(inp, x)

    with pytest.raises(TypeError, match=r"dilation_rate has value \(2,\) != \(1,\)"):
        converter.Converter(model, allow_fallback=False)

    with pytest.warns(UserWarning, match=r"dilation_rate has value \(2,\) != \(1,\)"):
        conv = converter.Converter(model, allow_fallback=True)
    assert conv.verify(training=False)
    assert conv.verify(training=True)
Пример #4
0
def test_batch_normalization(rng):
    inp = tf.keras.Input(shape=(4, 4, 3))
    out = []

    # TF<2.1 doesn't support axis!=-1 for fused batchnorm
    out.append(
        tf.keras.layers.BatchNormalization(
            axis=1,
            fused=False
            if version.parse(tf.__version__) < version.parse("2.1.0rc0") else
            None,
        )(inp))
    out.append(
        tf.keras.layers.BatchNormalization(
            axis=2,
            fused=False
            if version.parse(tf.__version__) < version.parse("2.1.0rc0") else
            None,
        )(inp))
    out.append(tf.keras.layers.BatchNormalization()(inp))
    out.append(
        tf.keras.layers.BatchNormalization(center=False, scale=False)(inp))

    model = tf.keras.Model(inputs=inp, outputs=out)

    # train it for a bit to initialize the moving averages
    model.compile(loss=tf.losses.mse, optimizer=tf.optimizers.SGD())
    model.fit(
        rng.uniform(size=(1024, 4, 4, 3)),
        [rng.uniform(size=(1024, 4, 4, 3))] * len(out),
        epochs=2,
    )

    inp_vals = [rng.uniform(size=(32, 4, 4, 3))]

    # test using tensornode fallback
    # TODO: there is some bug with using batchnormalization layers inside
    #  nengo_dl.Layers in general (unrelated to converting)
    # conv = convert.Converter(allow_fallback=True, inference_only=False)
    # with pytest.warns(UserWarning, match="falling back to nengo_dl.Layer"):
    #     net = conv.convert(model)
    #
    # assert conv.verify(model, net, training=False, inputs=inp_vals)
    # assert conv.verify(model, net, training=True, inputs=inp_vals)

    # test actually converting to nengo objects
    conv = converter.Converter(model,
                               allow_fallback=False,
                               inference_only=True)

    assert conv.verify(training=False, inputs=inp_vals, atol=1e-7)

    with pytest.raises(ValueError, match="number of trainable parameters"):
        # we don't expect the verification to pass for training=True, since we froze
        # the batch normalization in the nengo network (but not the keras model)
        conv.verify(training=True, inputs=inp_vals)

    # error if inference_only=False
    with pytest.raises(TypeError, match="unless inference_only=True"):
        converter.Converter(model, allow_fallback=False, inference_only=False)
Пример #5
0
def test_swap_activations_key_never_used():
    """
    Ensure warnings are thrown properly when there is an unused swap activations key.
    """

    def relu(x):
        return tf.maximum(x, 0)

    def relu2(x):
        return tf.maximum(x, 0)

    inp = tf.keras.Input((1,))
    out = tf.keras.layers.Dense(units=10, activation=relu)(inp)

    # Test that swap_activations are throwing warnings when not used
    with pytest.warns(UserWarning, match="no layers in the model with that activation"):
        conv = converter.Converter(
            tf.keras.Model(inp, out),
            allow_fallback=False,
            swap_activations={
                relu: nengo.RectifiedLinear(),
                relu2: nengo.RectifiedLinear(),
            },
        )
    assert conv.swap_activations.unused_keys() == {relu2}

    # Test that there is no warning if all keys are used
    inp = tf.keras.Input((1,))
    out = tf.keras.layers.Dense(units=10, activation=relu)(inp)
    out = tf.keras.layers.Dense(units=10, activation=relu2)(out)
    with pytest.warns(None) as recwarns:
        conv = converter.Converter(
            tf.keras.Model(inp, out),
            allow_fallback=False,
            swap_activations={
                relu: nengo.RectifiedLinear(),
                relu2: nengo.RectifiedLinear(),
                nengo.RectifiedLinear(): nengo.SpikingRectifiedLinear(),
            },
        )
    assert not any(
        "no layers in the model with that activation" in w.message for w in recwarns
    )
    assert len(conv.swap_activations.unused_keys()) == 0

    # check swap_activations dict functions
    assert len(conv.swap_activations) == 3
    assert set(conv.swap_activations.keys()) == {relu, relu2, nengo.RectifiedLinear()}
Пример #6
0
def test_max_pool(rng):
    inp = x = tf.keras.Input(shape=(4, 4, 2))
    x = tf.keras.layers.MaxPool2D()(x)

    model = tf.keras.Model(inp, x)

    with pytest.warns(UserWarning, match="consider setting max_to_avg_pool=True"):
        conv = converter.Converter(model, max_to_avg_pool=False)

    assert conv.verify(training=False)
    assert conv.verify(training=True)

    # can convert to avg pool, but then we don't expect output to match
    conv = converter.Converter(model, max_to_avg_pool=True, allow_fallback=False)
    with pytest.raises(ValueError, match="does not match output"):
        conv.verify(training=False, inputs=[rng.uniform(size=(2, 4, 4, 2))])
Пример #7
0
def test_concatenate(rng):
    inp = [
        tf.keras.Input(shape=(1, 4)),
        tf.keras.Input(shape=(2, 4)),
        tf.keras.Input(shape=(3, 5)),
    ]
    x = tf.keras.layers.Concatenate(axis=1)(inp[:2])
    x = tf.keras.layers.Concatenate(axis=-1)([x, inp[2]])

    _test_convert(
        inp,
        x,
        inp_vals=[
            rng.uniform(size=(5, 1, 4)),
            rng.uniform(size=(5, 2, 4)),
            rng.uniform(size=(5, 3, 5)),
        ],
    )

    inp = [tf.keras.Input(shape=(1, )), tf.keras.Input(shape=(1, ))]
    x = tf.keras.layers.Concatenate(axis=0)(inp)
    model = tf.keras.Model(inp, x)

    with pytest.raises(TypeError, match="concatenate along batch dimension"):
        converter.Converter(model, allow_fallback=False)
Пример #8
0
def test_fallback(Simulator):
    inp = x = tf.keras.Input(shape=(2, 2))

    class MyLayer(tf.keras.layers.Layer):
        def build(self, input_shapes):
            super().build(input_shapes)
            self.kernel = self.add_weight(
                shape=(), initializer=tf.initializers.RandomUniform())

        def call(self, inputs):
            assert inputs.shape[1:] == (2, 2)
            return tf.reshape(inputs * tf.cast(self.kernel, inputs.dtype),
                              shape=(-1, 4))

    layer = MyLayer()
    x = layer(x)
    x = tf.keras.layers.Reshape((2, 2))(x)
    x = layer(x)
    x = tf.keras.layers.Reshape((2, 2))(x)
    x = layer(x)

    model = tf.keras.Model(inp, x)
    conv = converter.Converter(model, allow_fallback=True)

    with Simulator(conv.net) as sim:
        # check that weights are being shared correctly
        assert len(sim.keras_model.trainable_weights) == 1
        assert sim.keras_model.trainable_weights[0].shape == ()

    assert conv.verify(training=False)
    assert conv.verify(training=True)
Пример #9
0
def test_densenet(Simulator, seed):
    tf.random.set_seed(seed)
    model = tf.keras.applications.densenet.DenseNet121(weights=None,
                                                       include_top=False,
                                                       input_shape=(112, 112,
                                                                    3))

    conv = converter.Converter(model,
                               allow_fallback=False,
                               max_to_avg_pool=True,
                               inference_only=True)

    keras_params = 0
    for layer in model.layers:
        if not isinstance(layer, BatchNormalization):
            for w in layer._trainable_weights:
                keras_params += np.prod(w.shape)

    # note: we don't expect any of the verification checks to pass, due to the
    # max_to_avg_pool swap, so just checking that the network structure has been
    # recreated
    with conv.net:
        # undo the inference_only=True so that parameters will be marked as
        # trainable (so that the check below will work)
        config.configure_settings(inference_only=False)

    with Simulator(conv.net) as sim:
        assert keras_params == sum(
            np.prod(w.shape) for w in sim.keras_model.trainable_weights)
Пример #10
0
def _test_convert(inputs, outputs, allow_fallback=False, inp_vals=None):
    model = tf.keras.Model(inputs=inputs, outputs=outputs)

    conv = converter.Converter(model, allow_fallback=allow_fallback)

    assert conv.verify(training=False, inputs=inp_vals)
    assert conv.verify(training=True, inputs=inp_vals)
Пример #11
0
def test_layer_dicts():
    inp0 = tf.keras.Input(shape=(1, ))
    inp1 = tf.keras.Input(shape=(1, ))
    add = tf.keras.layers.Add()([inp0, inp1])
    dense_node = tf.keras.layers.Dense(units=1)(add)
    dense_ens = tf.keras.layers.Dense(units=1,
                                      activation=tf.nn.relu)(dense_node)

    model = tf.keras.Model([inp0, inp1], [dense_node, dense_ens])

    conv = converter.Converter(model)
    assert len(conv.inputs) == 2
    assert len(conv.outputs) == 2
    assert len(conv.layers) == 5

    # inputs/outputs/layers referencing the same stuff
    assert isinstance(conv.outputs[dense_node], nengo.Probe)
    assert conv.outputs[dense_node].target is conv.layers[dense_node]
    assert conv.inputs[inp0] is conv.layers[inp0]

    # look up by tensor
    assert isinstance(conv.layers[dense_node], nengo.Node)
    assert isinstance(conv.layers[dense_ens], nengo.ensemble.Neurons)

    # look up by layer
    assert isinstance(conv.layers[model.layers[-2]], nengo.Node)
    assert isinstance(conv.layers[model.layers[-1]], nengo.ensemble.Neurons)

    # iterating over dict works as expected
    for i, tensor in enumerate(conv.layers):
        assert model.layers.index(tensor._keras_history.layer) == i

    # applying the same layer multiple times
    inp = tf.keras.Input(shape=(1, ))
    layer = tf.keras.layers.ReLU()
    x0 = layer(inp)
    x1 = layer(inp)

    model = tf.keras.Model(inp, [x0, x1])

    conv = converter.Converter(model, split_shared_weights=True)

    with pytest.raises(KeyError, match="multiple output tensors"):
        assert conv.layers[layer]

    assert conv.outputs[x0].target is conv.layers[x0]
    assert conv.outputs[x1].target is conv.layers[x1]
Пример #12
0
def test_sequential(seed):
    tf.random.set_seed(seed)

    model = tf.keras.Sequential()
    model.add(tf.keras.layers.Dense(32, input_shape=(4, )))
    model.add(tf.keras.layers.Dense(32))

    conv = converter.Converter(model, allow_fallback=False)
    assert conv.verify(training=False)
    assert conv.verify(training=True)
Пример #13
0
def _test_convert(inputs, outputs, allow_fallback=False, inp_vals=None):
    model = tf.keras.Model(inputs=inputs, outputs=outputs)

    conv = converter.Converter(model, allow_fallback=allow_fallback)

    # bug in TF2.3.0 when trying to run the verification function twice, see
    # https://github.com/tensorflow/tensorflow/issues/41239
    if version.parse(tf.__version__) < version.parse("2.3.0rc0"):
        assert conv.verify(training=False, inputs=inp_vals)
    assert conv.verify(training=True, inputs=inp_vals)
Пример #14
0
def test_sequential(seed):
    tf.random.set_seed(seed)

    model = tf.keras.Sequential()
    model.add(tf.keras.layers.Dense(32, input_shape=(4,)))
    model.add(tf.keras.layers.Dense(32))

    conv = converter.Converter(model, allow_fallback=False)
    assert conv.verify(training=False)
    # TODO: not sure why this is slightly less accurate in graph mode
    assert conv.verify(training=True, atol=1e-8 if compat.eager_enabled() else 1e-7)
Пример #15
0
def test_repeated_layers(seed):
    inp = x = tf.keras.layers.Input(shape=(4,))
    layer = tf.keras.layers.Dense(units=4)
    x = layer(x)
    x = layer(x)
    x = layer(x)

    model = tf.keras.Model(inputs=inp, outputs=x)

    conv = converter.Converter(model, allow_fallback=False, split_shared_weights=True)

    assert conv.verify(training=False)
    with pytest.raises(ValueError, match="number of trainable parameters"):
        # we don't expect the verification to pass for training=True, since we
        # split up the shared weights in the nengo network
        conv.verify(training=True)

    # error if split_shared_weights=False
    with pytest.raises(
        ValueError, match="not supported unless split_shared_weights=True"
    ):
        converter.Converter(model, split_shared_weights=False)
Пример #16
0
def test_dense_fallback_bias():
    def relu(x):
        return tf.maximum(x, 0)

    inp = tf.keras.Input((1, ))
    out = tf.keras.layers.Dense(units=10, activation=relu)(inp)

    _test_convert(inp, out, allow_fallback=True)

    # double check that extra biases aren't added when we _are_ using an Ensemble
    conv = converter.Converter(
        tf.keras.Model(inp, out),
        allow_fallback=False,
        swap_activations={relu: nengo.RectifiedLinear()},
    )
    assert conv.verify(training=True)
Пример #17
0
def test_scale_firing_rates_cases(Simulator, scale_firing_rates, expected_rates):
    input_val = 100
    bias_val = 50
    n_steps = 100

    inp = tf.keras.Input(shape=(1,))
    x0 = tf.keras.layers.ReLU()(inp)
    x1 = tf.keras.layers.Dense(
        units=1,
        activation=tf.nn.relu,
        kernel_initializer=tf.initializers.constant([[1]]),
        bias_initializer=tf.initializers.constant([[bias_val]]),
    )(inp)
    model = tf.keras.Model(inp, [x0, x1])

    # convert indices to layers
    scale_firing_rates = (
        {model.layers[k]: v for k, v in scale_firing_rates.items()}
        if isinstance(scale_firing_rates, dict)
        else scale_firing_rates
    )

    conv = converter.Converter(
        model,
        swap_activations={tf.nn.relu: nengo.SpikingRectifiedLinear()},
        scale_firing_rates=scale_firing_rates,
    )

    with Simulator(conv.net) as sim:
        sim.run_steps(
            n_steps, data={conv.inputs[inp]: np.ones((1, n_steps, 1)) * input_val}
        )

        for i, p in enumerate(conv.net.probes):
            # spike heights are scaled down
            assert np.allclose(np.max(sim.data[p]), 1 / sim.dt / expected_rates[i])

            # number of spikes is scaled up
            assert np.allclose(
                np.count_nonzero(sim.data[p]),
                (input_val if i == 0 else input_val + bias_val)
                * expected_rates[i]
                * n_steps
                * sim.dt,
                atol=1,
            )
Пример #18
0
def test_activation_swap(Simulator, keras_activation, nengo_activation, swap,
                         rng, seed):
    inp = x = tf.keras.Input(shape=(100, ))
    x = tf.keras.layers.Activation(activation=keras_activation)(x)
    x = tf.keras.layers.Dense(
        units=100,
        activation=keras_activation,
        kernel_initializer=tf.initializers.constant(np.eye(100)),
    )(x)
    model = tf.keras.Model(inp, x)

    conv = converter.Converter(model,
                               allow_fallback=False,
                               swap_activations=swap)

    with nengo.Network() as net:
        net.config[nengo.Ensemble].neuron_type = nengo_activation
        net.config[nengo.Ensemble].gain = nengo.dists.Choice([1])
        net.config[nengo.Ensemble].bias = nengo.dists.Choice([0])
        net.config[nengo.Connection].synapse = None

        inp = nengo.Node(np.zeros(100))
        ens0 = nengo.Ensemble(100, 1)
        nengo.Connection(inp, ens0.neurons)

        ens1 = nengo.Ensemble(100, 1)
        nengo.Connection(ens0.neurons, ens1.neurons)

        p = nengo.Probe(ens1.neurons)

    inp_vals = rng.uniform(size=(20, 50, 100))

    # set the seed so that initial voltages will be the same
    net.seed = seed
    conv.net.seed = seed

    with Simulator(net) as sim0:
        data0 = sim0.predict(inp_vals)

    with Simulator(conv.net) as sim1:
        data1 = sim1.predict(inp_vals)

    assert np.allclose(data0[p], data1[conv.outputs[model.outputs[0]]])
Пример #19
0
def test_synapse():
    inp = tf.keras.Input(shape=(1, ))
    dense0 = tf.keras.layers.Dense(units=10, activation=tf.nn.relu)(inp)
    dense1 = tf.keras.layers.Dense(units=10, activation=None)(dense0)

    model = tf.keras.Model(inp, [dense0, dense1])

    conv = converter.Converter(model, synapse=0.1)

    for conn in conv.net.all_connections:
        if conn.pre is conv.layers[dense0]:
            # synapse set on outputs from neurons
            assert conn.synapse == nengo.Lowpass(0.1)
        else:
            # synapse not set on other connections
            assert conn.synapse is None

    # synapse set on neuron probe
    assert conv.outputs[dense0].synapse == nengo.Lowpass(0.1)
    # not set on non-neuron probe
    assert conv.outputs[dense1].synapse is None
Пример #20
0
def test_input():
    inp = x = tf.keras.Input(shape=(None, None, 2))
    model = tf.keras.Model(inp, x)

    with pytest.raises(ValueError, match="must be fully specified"):
        converter.Converter(model)