コード例 #1
0
def sgc(x, edge_index, edge_weight, K, kernel, bias=None, renorm=True, improved=False, cache=None):
    """
    Functional API for Simple Graph Convolution (SGC).

    :param x: Tensor, shape: [num_nodes, num_features], node features
    :param edge_index: Tensor, shape: [2, num_edges], edge information
    :param edge_weight: Tensor or None, shape: [num_edges]
    :param K: Number of hops.(default: :obj:`1`)
    :param kernel: Tensor, shape: [num_features, num_output_features], weight.
    :param bias: Tensor, shape: [num_output_features], bias.
    :param renorm: Whether use renormalization trick (https://arxiv.org/pdf/1609.02907.pdf).
    :param improved: Whether use improved GCN or not.
    :param cache: A dict for caching A' for GCN. Different graph should not share the same cache dict.
    :return: Updated node features (x), shape: [num_nodes, num_features]
    """
    updated_edge_index, normed_edge_weight = gcn_norm_edge(edge_index, x.shape[0], edge_weight,
                                                           renorm, improved, cache)

    h = x
    for _ in range(K):
        h = aggregate_neighbors(
            h,
            updated_edge_index,
            normed_edge_weight,
            gcn_mapper,
            sum_reducer,
            identity_updater
        )

    h = h @ kernel

    if bias is not None:
        h += bias
    return h
コード例 #2
0
def gcn_graph_sage(x,
                   edge_index,
                   edge_weight,
                   kernel,
                   bias=None,
                   activation=None,
                   normalize=False,
                   cache=None):
    """

        :param x: Tensor, shape: [num_nodes, num_features], node features
        :param edge_index: Tensor, shape: [2, num_edges], edge information
        :param edge_weight: Tensor or None, shape: [num_edges]
        :param kernel: Tensor, shape: [num_features, num_output_features], weight
        :param bias: Tensor, shape: [num_output_features], bias
        :param activation: Activation function to use.
        :param normalize: If set to :obj:`True`, output features
                will be :math:`\ell_2`-normalized, *i.e.*,
                :math:`\frac{\mathbf{x}^{\prime}_i}
                {\| \mathbf{x}^{\prime}_i \|_2}`.
                (default: :obj:`False`)
        :param cache: A dict for caching A' for GCN. Different graph should not share the same cache dict.
        :return: Updated node features (x), shape: [num_nodes, num_output_features]
    """
    if edge_weight is not None:
        edge_weight = tf.ones([edge_index.shape[1]], dtype=tf.float32)

    updated_edge_index, normed_edge_weight = gcn_norm_edge(
        edge_index, x.shape[0], edge_weight, cache)
    row, col = updated_edge_index
    repeated_x = tf.gather(x, row)
    neighbor_x = tf.gather(x, col)

    neighbor_x = gcn_mapper(repeated_x,
                            neighbor_x,
                            edge_weight=normed_edge_weight)

    reduced_msg = sum_reducer(neighbor_x, row, num_nodes=x.shape[0])

    h = reduced_msg @ kernel
    if bias is not None:
        h += bias

    if activation is not None:
        h = activation(h)

    if normalize:
        h = tf.nn.l2_normalize(h, axis=-1)

    return h
コード例 #3
0
ファイル: tagcn.py プロジェクト: rahul5757/tf_geometric
def tagcn(x,
          edge_index,
          edge_weight,
          K,
          kernel,
          bias=None,
          activation=None,
          renorm=False,
          improved=False,
          cache=None):
    """
    Functional API for Topology Adaptive Graph Convolutional Network (TAGCN).

    :param x: Tensor, shape: [num_nodes, num_features], node features.
    :param edge_index: Tensor, shape: [2, num_edges], edge information.
    :param edge_weight: Tensor or None, shape: [num_edges].
    :param K: Number of hops.(default: :obj:`3`)
    :param kernel: Tensor, shape: [num_features, num_output_features], weight.
    :param bias: Tensor, shape: [num_output_features], bias.
    :param activation: Activation function to use.
    :param renorm: Whether use renormalization trick (https://arxiv.org/pdf/1609.02907.pdf).
    :param improved: Whether use improved GCN or not.
    :param cache: A dict for caching A' for GCN. Different graph should not share the same cache dict.
    :return: Updated node features (x), shape: [num_nodes, num_output_features]
    """

    xs = [x]
    updated_edge_index, normed_edge_weight = gcn_norm_edge(
        edge_index, x.shape[0], edge_weight, renorm, improved, cache)
    for k in range(K):
        h = aggregate_neighbors(xs[-1], updated_edge_index, normed_edge_weight,
                                gcn_mapper, sum_reducer, identity_updater)

        xs.append(h)

    h = tf.concat(xs, axis=-1)

    out = h @ kernel
    if bias is not None:
        out += bias

    if activation is not None:
        out = activation(out)

    return out
コード例 #4
0
def appnp(x,
          edge_index,
          edge_weight,
          kernels,
          biases,
          dense_activation=tf.nn.relu,
          activation=None,
          num_iterations=2,
          alpha=0.15,
          dense_drop_rate=0.0,
          edge_drop_rate=0.0,
          cache=None,
          training=False):
    """
    Functional API for Approximate Personalized Propagation of Neural Predictions (APPNP).

    :param x: Tensor, shape: [num_nodes, num_features], node features
    :param edge_index: Tensor, shape: [2, num_edges], edge information
    :param edge_weight: Tensor or None, shape: [num_edges]
    :param kernels: List[Tensor], shape of each Tensor: [num_features, num_output_features], weights
    :param biases: List[Tensor], shape of each Tensor: [num_output_features], biases
    :param dense_activation: Activation function to use for the dense layers,
        except for the last dense layer, which will not be activated.
    :param activation: Activation function to use for the output.
    :param num_iterations: Number of propagation power iterations.
    :param alpha: Teleport Probability.
    :param dense_drop_rate: Dropout rate for the input of every dense layer.
    :param edge_drop_rate: Dropout rate for the edges/adj used for propagation.
    :param cache: A dict for caching A' for GCN. Different graph should not share the same cache dict.
        To use @tf_utils.function with gcn, you should cache the noremd edge information before the first call of the gcn.

        - (1) If you're using OOP APIs tfg.layers.GCN:

              gcn_layer.cache_normed_edge(graph)

        - (2) If you're using functional API tfg.nn.gcn:

              from tf_geometric.nn.conv.gcn import gcn_cache_normed_edge
              gcn_cache_normed_edge(graph)

    :param training: Python boolean indicating whether the layer should behave in
        training mode (adding dropout) or in inference mode (doing nothing).
    :return: Updated node features (x), shape: [num_nodes, num_output_features]
    """

    num_nodes = tf.shape(x)[0]
    updated_edge_index, normed_edge_weight = gcn_norm_edge(edge_index,
                                                           num_nodes,
                                                           edge_weight,
                                                           cache=cache)

    num_dense_layers = len(kernels)

    h = x
    for i, (kernel, bias) in enumerate(zip(kernels, biases)):
        if training and dense_drop_rate > 0.0:
            h = tf.compat.v2.nn.dropout(h, dense_drop_rate)
        h = h @ kernel + bias
        if dense_activation is not None and i < num_dense_layers - 1:
            h = dense_activation(h)

    if training and edge_drop_rate > 0.0:
        normed_edge_weight = tf.compat.v2.nn.dropout(normed_edge_weight,
                                                     edge_drop_rate)

    prop_h = h

    for i in range(num_iterations):
        prop_h = aggregate_neighbors(prop_h,
                                     updated_edge_index,
                                     normed_edge_weight,
                                     gcn_mapper,
                                     sum_reducer,
                                     identity_updater,
                                     num_nodes=num_nodes)
        prop_h = prop_h * (1.0 - alpha) + h * alpha

    if activation is not None:
        prop_h = activation(prop_h)

    return prop_h