def test_meanpool_agg_apply_no_bias():
    # By default, bias_initializers="zeros", so all bias terms are initialised to zeros.
    agg = MeanPoolingAggregator(2, act="linear", kernel_initializer="ones")
    assert agg.get_config()["kernel_initializer"]["class_name"] == "Ones"
    assert agg.get_config()["bias_initializer"]["class_name"] == "Zeros"

    # Self features
    inp1 = keras.Input(shape=(1, 2))
    # Neighbour features
    inp2 = keras.Input(shape=(1, 2, 2))

    out = agg([inp1, inp2])

    # Check sizes
    assert agg.weight_dims == [1, 1]

    # Numerical test values
    x1 = np.array([[[1, 1]]])
    x2 = np.array([[[[2, 2], [3, 3]]]])

    # Agg output:
    # neigh_agg = mean(relu(x2 · ones(2x2) + zeros(2)), axis=1)
    #   = mean([[4,4],[6,6]]) = [[5,5]]
    # from_self = K.dot(x1, ones) = [[2]]
    # from_neigh = K.dot(neigh_agg, ones) = [[10]]

    model = keras.Model(inputs=[inp1, inp2], outputs=out)
    actual = model.predict([x1, x2])
    expected = np.array([[[2, 10]]])

    assert expected == pytest.approx(actual)
Exemple #2
0
def test_meanpool_agg_apply_hidden_bias():
    # Specifying bias_initializer="ones" initialises all bias terms to ones;
    # using bias=False turns of outer bias but retains hidden bias.
    agg = MeanPoolingAggregator(
        2, bias=False, act="linear", kernel_initializer="ones", bias_initializer="ones"
    )
    assert agg.get_config()["kernel_initializer"]["class_name"] == "Ones"
    assert agg.get_config()["bias_initializer"]["class_name"] == "Ones"

    # Self features
    inp1 = keras.Input(shape=(1, 2))
    # Neighbour features
    inp2 = keras.Input(shape=(1, 2, 2))

    out = agg([inp1, inp2])

    # Check sizes
    assert agg.weight_dims == [1, 1]

    # Numerical test values
    x1 = np.array([[[1, 1]]])
    x2 = np.array([[[[2, 2], [3, 3]]]])

    # Agg output:
    # neigh_agg = mean(relu(x2 · ones(2x2) + ones(2)), axis=1)
    #   = mean([[5,5],[7,7]]) = [[6,6]]
    # from_self = K.dot(x1, ones) = [[2]]
    # from_neigh = K.dot(neigh_agg, ones(2x1)) = [[12]]

    model = keras.Model(inputs=[inp1, inp2], outputs=out)
    actual = model.predict([x1, x2])
    expected = np.array([[[2, 12]]])

    assert expected == pytest.approx(actual)
def test_meanpool_agg_constructor():
    agg = MeanPoolingAggregator(2, bias=False)
    assert agg.output_dim == 2
    assert agg.hidden_dim == 2
    assert not agg.has_bias
    assert agg.act.__name__ == "relu"
    assert agg.hidden_act.__name__ == "relu"

    # Check config
    config = agg.get_config()
    assert config["output_dim"] == 2
    assert config["bias"] is False
    assert config["act"] == "relu"
Exemple #4
0
def test_meanpool_agg_zero_neighbours():
    agg = MeanPoolingAggregator(4, bias=False, act="linear", kernel_initializer="ones")

    inp1 = keras.Input(shape=(1, 2))
    inp2 = keras.Input(shape=(1, 0, 2))
    out = agg([inp1, inp2])

    # Now we have an input shape with a 0, the attention model switches to
    # a MLP and the first group will have non-zero output size.
    assert agg.weight_dims == [4, 0]

    model = keras.Model(inputs=[inp1, inp2], outputs=out)
    x1 = np.array([[[1, 1]]])
    x2 = np.zeros((1, 1, 0, 2))

    actual = model.predict([x1, x2])
    expected = np.array([[[2, 2, 2, 2]]])
    assert expected == pytest.approx(actual)
def test_meanpool_agg_constructor_1():
    agg = MeanPoolingAggregator(output_dim=4, bias=True, act=lambda x: x + 1)
    assert agg.output_dim == 4
    assert agg.hidden_dim == 4
    assert agg.has_bias
    assert agg.act(2) == 3