Пример #1
0
def test_attn_agg_apply():
    agg = AttentionalAggregator(2,
                                bias=False,
                                act="linear",
                                kernel_initializer="ones")
    agg.attn_act = keras.activations.get("linear")

    # Self features
    inp1 = keras.Input(shape=(1, 2))
    # Neighbour features
    inp2 = keras.Input(shape=(1, 2, 2))
    out = agg([inp1, inp2])

    # The AttentionalAggregator implmentation is a hack at the moment, it doesn't
    # assign any dimensions in the output to head-node features.
    assert agg.weight_dims == [0, 2]

    # Numerical test values
    x1 = np.array([[[1, 1]]])
    x2 = np.array([[[[2, 2], [3, 3]]]])

    # Agg output:
    # hs = relu(x1 · ones(2x2)) = [2,2]
    # hn = relu(x2 · ones(2x2)) =  [[2,2], [4,4], [6,6]]
    # attn_u = ones(2) · hs +  ones(2) · hn = [8, 12, 16]
    # attn = softmax(attn_u) = [3.3e-4, 1.8e-4, 9.81e-1]
    # hout =  attn · hn = [5.96, 5.96]
    model = keras.Model(inputs=[inp1, inp2], outputs=out)
    actual = model.predict([x1, x2])

    expected = np.array([[[5.963, 5.963]]])

    assert expected == pytest.approx(actual, rel=1e-4)
Пример #2
0
def test_attn_agg_constructor():
    agg = AttentionalAggregator(2, bias=False)
    assert agg.output_dim == 2
    assert not agg.has_bias
    assert agg.act.__name__ == "relu"
    # assert agg.attn_act.__name__ == "relu"

    # Check config
    config = agg.get_config()
    assert config["output_dim"] == 2
    assert config["bias"] is False
    assert config["act"] == "relu"
Пример #3
0
def test_attn_agg_zero_neighbours():
    agg = AttentionalAggregator(4, bias=False, act="linear", kernel_initializer="ones")

    inp1 = keras.Input(shape=(1, 2))
    inp2 = keras.Input(shape=(1, 0, 2))

    out = agg([inp1, inp2])
    model = keras.Model(inputs=[inp1, inp2], outputs=out)

    x1 = np.array([[[1, 1]]])
    x2 = np.zeros((1, 1, 0, 2))

    actual = model.predict([x1, x2])
    expected = np.array([[[2, 2, 2, 2]]])
    assert expected == pytest.approx(actual)
Пример #4
0
def test_attn_agg_constructor_1():
    agg = AttentionalAggregator(output_dim=4, bias=True, act=lambda x: x + 1)
    assert agg.output_dim == 4
    assert agg.has_bias
    assert agg.act(2) == 3