Ejemplo n.º 1
0
def test_APPNP_apply_sparse():

    G, features = create_graph_features()
    adj = nx.to_scipy_sparse_matrix(G)
    features, adj = GCN_Aadj_feats_op(features, adj)
    adj = adj.tocoo()
    A_indices = np.expand_dims(np.hstack((adj.row[:, None], adj.col[:, None])),
                               0)
    A_values = np.expand_dims(adj.data, 0)

    nodes = G.nodes()
    node_features = pd.DataFrame.from_dict(
        {n: f
         for n, f in zip(nodes, features)}, orient="index")
    G = StellarGraph(G, node_features=node_features)

    generator = FullBatchNodeGenerator(G, sparse=True, method="gcn")
    appnpnModel = APPNP([2], ["relu"], generator=generator, dropout=0.5)

    x_in, x_out = appnpnModel.node_model()
    model = keras.Model(inputs=x_in, outputs=x_out)

    # Check fit method
    out_indices = np.array([[0, 1]], dtype="int32")
    preds_1 = model.predict(
        [features[None, :, :], out_indices, A_indices, A_values])
    assert preds_1.shape == (1, 2, 2)

    # Check fit_generator method
    preds_2 = model.predict_generator(generator.flow(["a", "b"]))
    assert preds_2.shape == (1, 2, 2)

    assert preds_1 == pytest.approx(preds_2)
Ejemplo n.º 2
0
    def test_gat_serialize(self):
        G = example_graph_1(feature_size=self.F_in)
        gen = FullBatchNodeGenerator(G, sparse=self.sparse, method=self.method)
        gat = GAT(
            layer_sizes=self.layer_sizes,
            activations=self.activations,
            attn_heads=self.attn_heads,
            generator=gen,
            bias=True,
            normalize="l2",
        )

        x_in, x_out = gat.node_model()
        model = keras.Model(inputs=x_in, outputs=x_out)

        ng = gen.flow(G.nodes())

        # Save model
        model_json = model.to_json()

        # Set all weights to one
        model_weights = [np.ones_like(w) for w in model.get_weights()]

        # Load model from json & set all weights
        model2 = keras.models.model_from_json(
            model_json, custom_objects={"GraphAttention": GraphAttention})
        model2.set_weights(model_weights)

        # Test deserialized model
        actual = model2.predict_generator(ng)
        expected = np.ones(
            (G.number_of_nodes(),
             self.layer_sizes[-1])) * (1.0 / G.number_of_nodes())
        assert np.allclose(expected, actual[0])
Ejemplo n.º 3
0
    def test_gat_node_model_no_norm(self):
        G = example_graph_1(feature_size=self.F_in)
        gen = FullBatchNodeGenerator(G, sparse=self.sparse, method=self.method)
        gat = GAT(
            layer_sizes=self.layer_sizes,
            activations=self.activations,
            attn_heads=self.attn_heads,
            generator=gen,
            bias=True,
            normalize=None,
        )

        gat._layers[1].kernel_initializer = keras.initializers.get("ones")
        gat._layers[1].attn_kernel_initializer = keras.initializers.get("ones")
        gat._layers[3].kernel_initializer = keras.initializers.get("ones")
        gat._layers[3].attn_kernel_initializer = keras.initializers.get("ones")

        x_in, x_out = gat.node_model()

        model = keras.Model(inputs=x_in, outputs=x_out)

        ng = gen.flow(G.nodes())
        actual = model.predict_generator(ng)

        expected = np.ones((G.number_of_nodes(), self.layer_sizes[-1])) * (
            self.F_in * self.layer_sizes[0] * self.attn_heads)
        assert np.allclose(expected, actual[0])
Ejemplo n.º 4
0
def test_APPNP_apply_propagate_model_dense():
    G, features = create_graph_features()
    adj = nx.to_scipy_sparse_matrix(G)
    features, adj = GCN_Aadj_feats_op(features, adj)
    adj = adj.todense()[None, :, :]
    n_nodes = features.shape[0]

    nodes = G.nodes()
    node_features = pd.DataFrame.from_dict(
        {n: f
         for n, f in zip(nodes, features)}, orient="index")
    G = StellarGraph(G, node_features=node_features)

    generator = FullBatchNodeGenerator(G, sparse=False, method="gcn")
    appnpnModel = APPNP([2], ["relu"], generator=generator, dropout=0.5)

    fully_connected_model = keras.Sequential()
    fully_connected_model.add(Dense(2))

    x_in, x_out = appnpnModel.propagate_model(fully_connected_model)
    model = keras.Model(inputs=x_in, outputs=x_out)

    # Check fit method
    out_indices = np.array([[0, 1]], dtype="int32")
    preds_1 = model.predict([features[None, :, :], out_indices, adj])
    assert preds_1.shape == (1, 2, 2)

    # Check fit_generator method
    preds_2 = model.predict_generator(generator.flow(["a", "b"]))
    assert preds_2.shape == (1, 2, 2)

    assert preds_1 == pytest.approx(preds_2)
Ejemplo n.º 5
0
def test_PPNP_edge_cases():
    G, features = create_graph_features()
    adj = nx.to_scipy_sparse_matrix(G)
    features, adj = PPNP_Aadj_feats_op(features, adj)

    nodes = G.nodes()
    node_features = pd.DataFrame.from_dict(
        {n: f
         for n, f in zip(nodes, features)}, orient="index")
    G = StellarGraph(G, node_features=node_features)

    ppnp_sparse_failed = False
    try:
        generator = FullBatchNodeGenerator(G, sparse=True, method="ppnp")
    except ValueError as e:
        ppnp_sparse_failed = True
    assert ppnp_sparse_failed

    generator = FullBatchNodeGenerator(G, sparse=False, method="ppnp")

    try:
        ppnpModel = PPNP([2, 2], ["relu"], generator=generator, dropout=0.5)
    except ValueError as e:
        error = e
    assert str(
        error) == "The number of layers should equal the number of activations"

    try:
        ppnpModel = PPNP([2], ["relu"], generator=[0, 1], dropout=0.5)
    except TypeError as e:
        error = e
    assert str(
        error) == "Generator should be a instance of FullBatchNodeGenerator"
Ejemplo n.º 6
0
def test_GCN_apply_sparse():
    G, features = create_graph_features()
    adj = nx.to_numpy_array(G)[None, :, :]
    n_nodes = features.shape[0]

    nodes = G.nodes()
    node_features = pd.DataFrame.from_dict(
        {n: f
         for n, f in zip(nodes, features)}, orient="index")
    G = StellarGraph(G, node_features=node_features)

    generator = FullBatchNodeGenerator(G, sparse=False, method="none")
    gcnModel = GCN([2], ["relu"], generator=generator, dropout=0.5)

    x_in, x_out = gcnModel.node_model()
    model = keras.Model(inputs=x_in, outputs=x_out)

    # Check fit method
    out_indices = np.array([[0, 1]], dtype="int32")
    preds_1 = model.predict([features[None, :, :], out_indices, adj])
    assert preds_1.shape == (1, 2, 2)

    # Check fit_generator method
    preds_2 = model.predict_generator(generator.flow(["a", "b"]))
    assert preds_2.shape == (1, 2, 2)

    assert preds_1 == pytest.approx(preds_2)
Ejemplo n.º 7
0
    def test_gat_node_model_no_norm(self):
        G = example_graph_1(feature_size=self.F_in)
        gen = FullBatchNodeGenerator(G)
        gat = GAT(
            layer_sizes=self.layer_sizes,
            activations=self.activations,
            attn_heads=self.attn_heads,
            generator=gen,
            bias=True,
            normalize=None,
        )

        gat._layers[1].kernel_initializer = keras.initializers.get("ones")
        gat._layers[1].attn_kernel_initializer = keras.initializers.get("ones")
        gat._layers[3].kernel_initializer = keras.initializers.get("ones")
        gat._layers[3].attn_kernel_initializer = keras.initializers.get("ones")

        assert len(gat.node_model()) == 2
        x_in, x_out = gat.node_model()
        assert len(x_in) == 2
        assert int(x_in[0].shape[-1]) == self.F_in
        assert x_in[1]._keras_shape == (None, G.number_of_nodes())
        assert int(x_out.shape[-1]) == self.layer_sizes[-1]

        model = keras.Model(inputs=x_in, outputs=x_out)

        X = gen.features
        A = gen.Aadj
        actual = model.predict([X, A])
        expected = np.ones((G.number_of_nodes(), self.layer_sizes[-1])) * (
            self.F_in * self.layer_sizes[0] * self.attn_heads)
        assert expected == pytest.approx(actual)
Ejemplo n.º 8
0
def test_GCN_regularisers():
    G, features = create_graph_features()
    adj = nx.to_numpy_array(G)[None, :, :]
    n_nodes = features.shape[0]

    nodes = G.nodes()
    node_features = pd.DataFrame.from_dict(
        {n: f
         for n, f in zip(nodes, features)}, orient="index")
    G = StellarGraph(G, node_features=node_features)

    generator = FullBatchNodeGenerator(G, sparse=False, method="none")

    gcn = GCN([2], generator)

    gcn = GCN([2], generator, kernel_initializer="ones")

    gcn = GCN([2], generator, kernel_initializer=initializers.ones())

    with pytest.raises(ValueError):
        gcn = GCN([2], generator, kernel_initializer="fred")

    gcn = GCN([2], generator, bias_initializer="zeros")

    gcn = GCN([2], generator, bias_initializer=initializers.zeros())

    with pytest.raises(ValueError):
        gcn = GCN([2], generator, bias_initializer="barney")
Ejemplo n.º 9
0
def test_GCN_activations():
    G, features = create_graph_features()
    adj = nx.to_numpy_array(G)[None, :, :]
    n_nodes = features.shape[0]

    nodes = G.nodes()
    node_features = pd.DataFrame.from_dict(
        {n: f
         for n, f in zip(nodes, features)}, orient="index")
    G = StellarGraph(G, node_features=node_features)

    generator = FullBatchNodeGenerator(G, sparse=False, method="none")

    gcn = GCN([2], generator)
    assert gcn.activations == ["relu"]

    gcn = GCN([2, 2], generator)
    assert gcn.activations == ["relu", "relu"]

    gcn = GCN([2], generator, activations=["linear"])
    assert gcn.activations == ["linear"]

    with pytest.raises(ValueError):
        # More regularisers than layers
        gcn = GCN([2], generator, activations=["relu", "linear"])

    with pytest.raises(ValueError):
        # Fewer regularisers than layers
        gcn = GCN([2, 2], generator, activations=["relu"])

    with pytest.raises(ValueError):
        # Unknown regularisers
        gcn = GCN([2], generator, activations=["bleach"])
Ejemplo n.º 10
0
    def test_gat_serialize(self):
        G = example_graph_1(feature_size=self.F_in)
        gen = FullBatchNodeGenerator(G)
        gat = GAT(
            layer_sizes=self.layer_sizes,
            activations=self.activations,
            attn_heads=self.attn_heads,
            generator=gen,
            bias=True,
            normalize="l2",
        )

        x_in, x_out = gat.node_model()
        model = keras.Model(inputs=x_in, outputs=x_out)

        # Save model
        model_json = model.to_json()

        # Set all weights to one
        model_weights = [np.ones_like(w) for w in model.get_weights()]

        # Load model from json & set all weights
        model2 = keras.models.model_from_json(
            model_json, custom_objects={"GraphAttention": GraphAttention})
        model2.set_weights(model_weights)

        # Test loaded model
        X = gen.features
        A = gen.Aadj
        actual = model2.predict([X, A])
        expected = np.ones(
            (G.number_of_nodes(),
             self.layer_sizes[-1])) * (1.0 / G.number_of_nodes())
        assert expected == pytest.approx(actual)
Ejemplo n.º 11
0
 def test_gat_node_model_wrong_norm(self):
     G = example_graph_1(feature_size=self.F_in)
     gen = FullBatchNodeGenerator(G)
     with pytest.raises(ValueError):
         gat = GAT(
             layer_sizes=self.layer_sizes,
             activations=self.activations,
             attn_heads=self.attn_heads,
             generator=gen,
             bias=True,
             normalize="whatever",
         )
Ejemplo n.º 12
0
def test_GCN_init():
    G, features = create_graph_features()
    nodes = G.nodes()
    node_features = pd.DataFrame.from_dict(
        {n: f
         for n, f in zip(nodes, features)}, orient="index")
    G = StellarGraph(G, node_type_name="node", node_features=node_features)

    generator = FullBatchNodeGenerator(G)
    gcnModel = GCN([2], ["relu"], generator=generator, dropout=0.5)

    assert gcnModel.layer_sizes == [2]
    assert gcnModel.activations == ["relu"]
    assert gcnModel.dropout == 0.5
Ejemplo n.º 13
0
    def test_gat_sparse_node_model_constructor(self):
        G = example_graph_1(feature_size=self.F_in)
        gen = FullBatchNodeGenerator(G, sparse=self.sparse, method=self.method)
        gat = GAT(
            layer_sizes=self.layer_sizes,
            activations=self.activations,
            attn_heads=self.attn_heads,
            generator=gen,
            bias=True,
        )

        assert len(gat.node_model()) == 2
        x_in, x_out = gat.node_model()
        assert len(x_in) == 4 if self.sparse else 3
        assert int(x_in[0].shape[-1]) == self.F_in
        assert int(x_out.shape[-1]) == self.layer_sizes[-1]
Ejemplo n.º 14
0
    def test_gat_node_model_constructor(self):
        G = example_graph_1(feature_size=self.F_in)
        gen = FullBatchNodeGenerator(G)
        gat = GAT(
            layer_sizes=self.layer_sizes,
            activations=self.activations,
            attn_heads=self.attn_heads,
            generator=gen,
            bias=True,
        )

        assert len(gat.node_model()) == 2
        x_in, x_out = gat.node_model()
        assert len(x_in) == 2
        assert int(x_in[0].shape[-1]) == self.F_in
        assert x_in[1]._keras_shape == (None, G.number_of_nodes())
        assert int(x_out.shape[-1]) == self.layer_sizes[-1]
Ejemplo n.º 15
0
def test_GCN_apply():
    G, features = create_graph_features()
    adj = nx.adjacency_matrix(G)

    nodes = G.nodes()
    node_features = pd.DataFrame.from_dict(
        {n: f for n, f in zip(nodes, features)}, orient="index"
    )
    G = StellarGraph(G, node_type_name="node", node_features=node_features)

    generator = FullBatchNodeGenerator(G)
    gcnModel = GCN([2], ["relu"], generator=generator, dropout=0.5)

    x_in, x_out = gcnModel.node_model()
    model = keras.Model(inputs=x_in, outputs=x_out)
    preds = model.predict([features, adj], batch_size=adj.shape[0])

    assert preds.shape == (3, 2)
Ejemplo n.º 16
0
    def test_constructor(self):
        G = example_graph_1(feature_size=self.F_in)
        gen = FullBatchNodeGenerator(G, sparse=self.sparse, method=self.method)
        # test error if no activations are passed:
        with pytest.raises(TypeError):
            gat = GAT(layer_sizes=self.layer_sizes, generator=gen, bias=True)

        # test error where layer_sizes is not a list:
        with pytest.raises(TypeError):
            gat = GAT(
                layer_sizes=10,
                activations=self.activations,
                attn_heads=self.attn_heads,
                generator=gen,
                bias=True,
            )

        # test error where layer_sizes values are not valid
        with pytest.raises(ValueError):
            gat = GAT(
                layer_sizes=[4, 0],
                activations=self.activations,
                attn_heads=self.attn_heads,
                generator=gen,
                bias=True,
            )

        # test for incorrect length of att_heads list:
        with pytest.raises(ValueError):
            gat = GAT(
                layer_sizes=self.layer_sizes,
                activations=self.activations,
                attn_heads=[8, 8, 1],
                generator=gen,
                bias=True,
            )

        # test for invalid values in att_heads list:
        with pytest.raises(ValueError):
            gat = GAT(
                layer_sizes=self.layer_sizes,
                activations=self.activations,
                attn_heads=[8, 0],
                generator=gen,
                bias=True,
            )

        # test for invalid type of att_heads argument:
        with pytest.raises(TypeError):
            gat = GAT(
                layer_sizes=self.layer_sizes,
                activations=self.activations,
                attn_heads=8.0,
                generator=gen,
                bias=True,
            )

        # test error where activations is not a list:
        with pytest.raises(TypeError):
            gat = GAT(
                layer_sizes=self.layer_sizes,
                activations="relu",
                generator=gen,
                bias=True,
            )

        # test attn_heads_reduction errors:
        with pytest.raises(TypeError):
            gat = GAT(
                layer_sizes=self.layer_sizes,
                activations=self.activations,
                attn_heads=self.attn_heads,
                attn_heads_reduction="concat",
                generator=gen,
                bias=True,
            )
        with pytest.raises(ValueError):
            gat = GAT(
                layer_sizes=self.layer_sizes,
                activations=self.activations,
                attn_heads=self.attn_heads,
                attn_heads_reduction=["concat", "concat", "average"],
                generator=gen,
                bias=True,
            )
        with pytest.raises(ValueError):
            gat = GAT(
                layer_sizes=self.layer_sizes,
                activations=self.activations,
                attn_heads=self.attn_heads,
                attn_heads_reduction=["concat", "sum"],
                generator=gen,
                bias=True,
            )

        # test error where len(activations) is not equal to len(layer_sizes):
        with pytest.raises(ValueError):
            gat = GAT(
                layer_sizes=self.layer_sizes,
                activations=["relu"],
                generator=gen,
                bias=True,
            )

        # Default attention heads reductions:
        gat = GAT(
            layer_sizes=self.layer_sizes,
            activations=self.activations,
            attn_heads=self.attn_heads,
            generator=gen,
            bias=True,
        )

        assert gat.activations == self.activations
        assert gat.attn_heads_reduction == ["concat", "average"]
        assert gat.generator == gen

        # User-specified attention heads reductions:
        gat = GAT(
            layer_sizes=self.layer_sizes,
            activations=self.activations,
            attn_heads=self.attn_heads,
            attn_heads_reduction=["concat", "concat"],
            generator=gen,
            bias=True,
        )

        assert gat.attn_heads_reduction == ["concat", "concat"]
Ejemplo n.º 17
0
def test_APPNP_edge_cases():
    G, features = create_graph_features()
    adj = nx.to_scipy_sparse_matrix(G)
    features, adj = GCN_Aadj_feats_op(features, adj)
    adj = adj.todense()[None, :, :]
    n_nodes = features.shape[0]

    nodes = G.nodes()
    node_features = pd.DataFrame.from_dict(
        {n: f
         for n, f in zip(nodes, features)}, orient="index")
    G = StellarGraph(G, node_features=node_features)

    generator = FullBatchNodeGenerator(G, sparse=False, method="gcn")

    try:
        appnpModel = APPNP([2, 2], ["relu"], generator=generator, dropout=0.5)
    except ValueError as e:
        error = e
    assert str(
        error) == "The number of layers should equal the number of activations"

    try:
        appnpModel = APPNP([2], ["relu"], generator=[0, 1], dropout=0.5)
    except TypeError as e:
        error = e
    assert str(
        error) == "Generator should be a instance of FullBatchNodeGenerator"

    try:
        appnpModel = APPNP([2], ["relu"],
                           generator=generator,
                           dropout=0.0,
                           approx_iter=-1)
    except ValueError as e:
        error = e
    assert str(error) == "approx_iter should be a positive integer"

    try:
        appnpModel = APPNP([2], ["relu"],
                           generator=generator,
                           dropout=0.0,
                           approx_iter=1.2)
    except ValueError as e:
        error = e
    assert str(error) == "approx_iter should be a positive integer"

    try:
        appnpModel = APPNP([2], ["relu"],
                           generator=generator,
                           dropout=0.0,
                           teleport_probability=1.2)
    except ValueError as e:
        error = e
    assert str(
        error) == "teleport_probability should be between 0 and 1 (inclusive)"

    try:
        appnpModel = APPNP([2], ["relu"],
                           generator=generator,
                           dropout=0.0,
                           teleport_probability=1.2)
    except ValueError as e:
        error = e
    assert str(
        error) == "teleport_probability should be between 0 and 1 (inclusive)"