示例#1
0
    def __init__(self, in_features,
                 out_features, hids=[16], num_heads=[8],
                 acts=['elu'], dropout=0.6,
                 weight_decay=5e-4,
                 lr=0.01, bias=True):

        x = Input(batch_shape=[None, in_features],
                  dtype=floatx(), name='node_attr')
        adj = Input(batch_shape=[None, None], dtype=floatx(),
                    sparse=True, name='adj_matrix')

        h = x
        for hid, num_head, act in zip(hids, num_heads, acts):
            h = GraphAttention(hid, attn_heads=num_head,
                               reduction='concat',
                               use_bias=bias,
                               activation=act,
                               kernel_regularizer=regularizers.l2(weight_decay),
                               attn_kernel_regularizer=regularizers.l2(
                                   weight_decay),
                               )([h, adj])
            h = Dropout(rate=dropout)(h)

        h = GraphAttention(out_features, use_bias=bias,
                           attn_heads=1, reduction='average')([h, adj])

        super().__init__(inputs=[x, adj], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr), metrics=['accuracy'])
示例#2
0
    def __init__(self, in_features, out_features,
                 hids=[64], acts=['relu'],
                 dropout=0.5, weight_decay=5e-3,
                 lr=0.01, bias=False, K=10):

        x = Input(batch_shape=[None, in_features],
                  dtype=floatx(), name='node_attr')
        adj = Input(batch_shape=[None, None], dtype=floatx(),
                    sparse=True, name='adj_matrix')

        h = x
        for hid, act in zip(hids, acts):
            h = Dense(hid, use_bias=bias, activation=act,
                      kernel_regularizer=regularizers.l2(weight_decay))(h)
            h = Dropout(dropout)(h)

        h = Dense(out_features, use_bias=bias, activation=acts[-1],
                  kernel_regularizer=regularizers.l2(weight_decay))(h)
        h = Dropout(dropout)(h)

        h = PropConvolution(K, use_bias=bias, activation='sigmoid',
                            kernel_regularizer=regularizers.l2(weight_decay))([h, adj])

        super().__init__(inputs=[x, adj], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr), metrics=['accuracy'])
示例#3
0
    def __init__(self, in_features, out_features,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01, bias=False,
                 experimental_run_tf_function=True):

        x = Input(batch_shape=[None, in_features],
                  dtype=floatx(), name='node_attr')
        adj = Input(batch_shape=[None, None], dtype=floatx(),
                    sparse=True, name='adj_matrix')

        h = x
        for hid, act in zip(hids, acts):
            h = GraphConvolution(hid, use_bias=bias,
                                 activation=act,
                                 kernel_regularizer=regularizers.l2(weight_decay))([h, adj])

            h = Dropout(rate=dropout)(h)

        h = GraphConvolution(out_features, use_bias=bias)([h, adj])

        super().__init__(inputs=[x, adj], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr), metrics=['accuracy'],
                     experimental_run_tf_function=experimental_run_tf_function)
示例#4
0
    def __init__(self, in_channels, out_channels,
                 hiddens=[16],
                 activations=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01, order=2, use_bias=False):

        x = Input(batch_shape=[None, in_channels],
                  dtype=floatx(), name='node_attr')
        adj = [Input(batch_shape=[None, None],
                     dtype=floatx(), sparse=True,
                     name=f'adj_matrix_{i}') for i in range(order + 1)]
        index = Input(batch_shape=[None], dtype=intx(), name='node_index')

        h = x
        for hidden, activation in zip(hiddens, activations):
            h = ChebyConvolution(hidden, order=order, use_bias=use_bias,
                                 activation=activation,
                                 kernel_regularizer=regularizers.l2(weight_decay))([h, adj])
            h = Dropout(rate=dropout)(h)

        h = ChebyConvolution(out_channels,
                             order=order, use_bias=use_bias)([h, adj])
        h = Gather()([h, index])

        super().__init__(inputs=[x, *adj, index], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr), metrics=['accuracy'])
示例#5
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hiddens=[32],
                 activations=['relu'],
                 dropout=0.5,
                 l2_norm=5e-4,
                 lr=0.01,
                 use_bias=False):

        x = Input(batch_shape=[None, in_channels],
                  dtype=floatx(),
                  name='attr_matrix')
        adj = Input(batch_shape=[None, None],
                    dtype=floatx(),
                    sparse=True,
                    name='adj_matrix')

        h = x
        for hidden, activation in zip(hiddens, activations):
            h = Dense(hidden,
                      use_bias=use_bias,
                      activation=activation,
                      kernel_regularizer=regularizers.l2(l2_norm))(h)
            h = Dropout(rate=dropout)(h)

        h = GraphConvolution(out_channels, use_bias=use_bias)([h, adj])

        super().__init__(inputs=[x, adj], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr),
                     metrics=['accuracy'])
示例#6
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hiddens=[32],
                 n_filters=[8, 8],
                 activations=[None, None],
                 dropout=0.8,
                 l2_norm=5e-4,
                 lr=0.1,
                 use_bias=False,
                 K=8):

        x = Input(batch_shape=[None, in_channels],
                  dtype=floatx(),
                  name='attr_matrix')
        adj = Input(batch_shape=[None, None],
                    dtype=floatx(),
                    sparse=False,
                    name='adj_matrix')
        mask = Input(batch_shape=[None], dtype='bool', name='node_mask')

        h = x
        for idx, hidden in enumerate(hiddens):
            h = Dropout(rate=dropout)(h)
            h = DenseConvolution(
                hidden,
                use_bias=use_bias,
                activation=activations[idx],
                kernel_regularizer=regularizers.l2(l2_norm))([h, adj])

        for idx, n_filter in enumerate(n_filters):
            top_k_h = Top_k_features(K=K)([h, adj])
            cur_h = LGConvolution(
                n_filter,
                kernel_size=K,
                use_bias=use_bias,
                dropout=dropout,
                activation=activations[idx],
                kernel_regularizer=regularizers.l2(l2_norm))(top_k_h)
            cur_h = BatchNormalization()(cur_h)
            h = Concatenate()([h, cur_h])

        h = Dropout(rate=dropout)(h)
        h = DenseConvolution(
            out_channels,
            use_bias=use_bias,
            activation=activations[-1],
            kernel_regularizer=regularizers.l2(l2_norm))([h, adj])

        h = Mask()([h, mask])

        super().__init__(inputs=[x, adj, mask], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Nadam(lr=lr),
                     metrics=['accuracy'])
示例#7
0
    def __init__(self,
                 in_features,
                 out_features,
                 hids=[32],
                 num_filters=[8, 8],
                 acts=[None, None],
                 dropout=0.8,
                 weight_decay=5e-4,
                 lr=0.1,
                 bias=False,
                 K=8):

        x = Input(batch_shape=[None, in_features],
                  dtype=floatx(),
                  name='node_attr')
        adj = Input(batch_shape=[None, None],
                    dtype=floatx(),
                    sparse=False,
                    name='adj_matrix')

        h = x
        for idx, hid in enumerate(hids):
            h = Dropout(rate=dropout)(h)
            h = DenseConv(
                hid,
                use_bias=bias,
                activation=acts[idx],
                kernel_regularizer=regularizers.l2(weight_decay))([h, adj])

        for idx, num_filter in enumerate(num_filters):
            top_k_h = Top_k_features(K=K)([h, adj])
            cur_h = LGConv(
                num_filter,
                kernel_size=K,
                use_bias=bias,
                dropout=dropout,
                activation=acts[idx],
                kernel_regularizer=regularizers.l2(weight_decay))(top_k_h)
            cur_h = BatchNormalization()(cur_h)
            h = Concatenate()([h, cur_h])

        h = Dropout(rate=dropout)(h)
        h = DenseConv(
            out_features,
            use_bias=bias,
            activation=acts[-1],
            kernel_regularizer=regularizers.l2(weight_decay))([h, adj])

        super().__init__(inputs=[x, adj], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Nadam(lr=lr),
                     metrics=['accuracy'])
def infer_type(x):
    """Infer type of the input `x`.

    Parameters:
    ----------
    x: Any python object

    Returns:
    ----------
    dtype: string, the converted type of `x`:
        1. `graphgallery.floatx()` if `x` is floating
        2. `graphgallery.intx()` if `x` is integer
        3. `'bool'` if `x` is bool.

    """
    # For tensor or variable
    if is_th_tensor(x):
        if x.dtype.is_floating_point:
            return floatx()
        elif x.dtype == torch.bool:
            return 'bool'
        elif 'int' in str(x.dtype):
            return intx()
        else:
            raise RuntimeError(f'Invalid input of `{type(x)}`')

    elif is_tf_tensor(x):
        if x.dtype.is_floating:
            return floatx()
        elif x.dtype.is_integer or x.dtype.is_unsigned:
            return intx()
        elif x.dtype.is_bool:
            return 'bool'
        else:
            raise RuntimeError(f'Invalid input of `{type(x)}`')

    if not hasattr(x, 'dtype'):
        x = np.asarray(x)

    if x.dtype.kind in {'f', 'c'}:
        return floatx()
    elif x.dtype.kind in {'i', 'u'}:
        return intx()
    elif x.dtype.kind == 'b':
        return 'bool'
    elif x.dtype.kind == 'O':
        raise RuntimeError(f'Invalid inputs of `{x}`.')
    else:
        raise RuntimeError(f'Invalid input of `{type(x)}`')
示例#9
0
def infer_type(x: Any) -> str:
    """Infer the type of the input 'x'.

    Parameters:
    ----------
    x: Any python object

    Returns:
    ----------
    dtype: string, the proper data type of 'x':
        1. 'graphgallery.floatx()' if 'x' is floating,
        2. 'graphgallery.intx()' if 'x' is integer,
        3. 'graphgallery.boolx()' if 'x' is boolean.
    """
    # For tensor or variable
    if pytorch.is_tensor(x):
        if x.dtype.is_floating_point:
            return gg.floatx()
        elif x.dtype == torch.bool:
            return gg.boolx()
        elif 'int' in str(x.dtype):
            return gg.intx()
        else:
            raise TypeError(f"Invalid type of pytorch Tensor: '{type(x)}'")

    elif tensorflow.is_tensor(x):
        if x.dtype.is_floating:
            return gg.floatx()
        elif x.dtype.is_integer or x.dtype.is_unsigned:
            return gg.intx()
        elif x.dtype.is_bool:
            return gg.boolx()
        else:
            raise TypeError(f"Invalid type of tensorflow Tensor: '{type(x)}'")

    _x = x
    if not hasattr(_x, 'dtype'):
        _x = np.asarray(_x)

    if _x.dtype.kind in {'f', 'c'}:
        return gg.floatx()
    elif _x.dtype.kind in {'i', 'u'}:
        return gg.intx()
    elif _x.dtype.kind == 'b':
        return gg.boolx()
    elif _x.dtype.kind == 'O':
        raise TypeError(f"Invalid inputs of '{x}'.")
    else:
        raise TypeError(f"Invalid input of '{type(x).__name__}'.")
示例#10
0
    def __init__(self, in_features, out_features,
                 hids=[16], acts=['relu'], dropout=0.5,
                 weight_decay=5e-4, lr=0.01, bias=False):

        _intx = intx()
        _floatx = floatx()
        x = Input(batch_shape=[None, in_features],
                  dtype=_floatx, name='node_attr')
        edge_index = Input(batch_shape=[None, 2], dtype=_intx,
                           name='edge_index')
        edge_weight = Input(batch_shape=[None], dtype=_floatx,
                            name='edge_weight')

        h = x
        for hid, act in zip(hids, acts):
            h = GraphEdgeConvolution(hid, use_bias=bias,
                                     activation=act,
                                     kernel_regularizer=regularizers.l2(weight_decay))([h, edge_index, edge_weight])

            h = Dropout(rate=dropout)(h)

        h = GraphEdgeConvolution(out_features, use_bias=bias)(
            [h, edge_index, edge_weight])

        super().__init__(inputs=[x, edge_index, edge_weight], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr), metrics=['accuracy'])
示例#11
0
def add_selfloops_edge(edge_index, edge_weight, n_nodes=None, fill_weight=1.0):

    if n_nodes is None:
        n_nodes = tf.reduce_max(edge_index) + 1

    if edge_weight is None:
        edge_weight = tf.ones([edge_index.shape[0]], dtype=floatx())

    range_arr = tf.range(n_nodes, dtype=edge_index.dtype)
    diagnal_edge_index = tf.stack([range_arr, range_arr], axis=1)
    updated_edge_index = tf.concat([edge_index, diagnal_edge_index], axis=0)

    diagnal_edge_weight = tf.zeros([n_nodes], dtype=floatx()) + fill_weight
    updated_edge_weight = tf.concat([edge_weight, diagnal_edge_weight], axis=0)

    return updated_edge_index, updated_edge_weight
示例#12
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 use_bias=False,
                 experimental_run_tf_function=True):

        x = Input(batch_shape=[None, in_channels],
                  dtype=floatx(),
                  name='node_attr')

        h = x
        for hid, act in zip(hids, acts):
            h = Dense(hid,
                      use_bias=use_bias,
                      activation=act,
                      kernel_regularizer=regularizers.l2(weight_decay))(h)

            h = Dropout(rate=dropout)(h)

        h = Dense(out_channels, use_bias=use_bias)(h)

        super().__init__(inputs=x, outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr),
                     metrics=['accuracy'],
                     experimental_run_tf_function=experimental_run_tf_function)
示例#13
0
def normalize_edge_tensor(edge_index, edge_weight=None, n_nodes=None, fill_weight=1.0, rate=-0.5):

    if edge_weight is None:
        edge_weight = tf.ones([edge_index.shape[0]], dtype=floatx())

    if n_nodes is None:
        n_nodes = tf.reduce_max(edge_index) + 1

    edge_index, edge_weight = add_selfloops_edge(
        edge_index, edge_weight, n_nodes=n_nodes, fill_weight=fill_weight)

    row, col = tf.unstack(edge_index, axis=1)
    deg = tf.math.unsorted_segment_sum(edge_weight, row, num_segments=n_nodes)
    deg_inv_sqrt = tf.pow(deg, rate)

    # check if exists NAN
    deg_inv_sqrt = tf.where(
        tf.math.logical_or(tf.math.is_inf(deg_inv_sqrt),
                           tf.math.is_nan(deg_inv_sqrt)),
        tf.zeros_like(deg_inv_sqrt),
        deg_inv_sqrt
    )

    edge_weight_norm = tf.gather(
        deg_inv_sqrt, row) * edge_weight * tf.gather(deg_inv_sqrt, col)

    return edge_index, edge_weight_norm
示例#14
0
def sparse_edges_to_sparse_tensor(edge_index: np.ndarray,
                                  edge_weight: np.ndarray = None,
                                  shape: tuple = None) -> torch.sparse.Tensor:
    """
    edge_index: shape [2, M]
    edge_weight: shape [M,]
    """
    edge_index = T.edge_transpose(edge_index)
    edge_index = torch.LongTensor(edge_index)

    if edge_weight is None:
        edge_weight = torch.ones(edge_index.shape[1],
                                 dtype=getattr(torch, floatx()))
    else:
        edge_weight = torch.tensor(edge_weight)

    if shape is None:
        N = (edge_index).max() + 1
        shape = (N, N)

    shape = torch.Size(shape)
    dtype = str(edge_weight.dtype)
    return getattr(torch.sparse,
                   dtype_to_tensor_class(dtype))(edge_index, edge_weight,
                                                 shape)
示例#15
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hiddens=[32],
                 activations=['relu'],
                 dropout=0.5,
                 l2_norm=5e-4,
                 lr=0.01,
                 use_bias=True,
                 aggregator='mean',
                 output_normalize=False,
                 n_samples=[15, 5]):

        Agg = _AGG.get(aggregator, None)
        if not Agg:
            raise ValueError(
                f"Invalid value of 'aggregator', allowed values {tuple(_AGG.keys())}, but got '{aggregator}'."
            )

        _intx = intx()
        x = Input(batch_shape=[None, in_channels],
                  dtype=floatx(),
                  name='attr_matrix')
        nodes = Input(batch_shape=[None], dtype=_intx, name='nodes')
        neighbors = [
            Input(batch_shape=[None], dtype=_intx, name=f'neighbors_{hop}')
            for hop, n_sample in enumerate(n_samples)
        ]

        aggregators = []
        for hidden, activation in zip(hiddens, activations):
            # you can use `GCNAggregator` instead
            aggregators.append(
                Agg(hidden,
                    concat=True,
                    activation=activation,
                    use_bias=use_bias,
                    kernel_regularizer=regularizers.l2(l2_norm)))

        aggregators.append(Agg(out_channels, use_bias=use_bias))

        h = [tf.nn.embedding_lookup(x, node) for node in [nodes, *neighbors]]
        for agg_i, aggregator in enumerate(aggregators):
            attribute_shape = h[0].shape[-1]
            for hop in range(len(n_samples) - agg_i):
                neighbor_shape = [-1, n_samples[hop], attribute_shape]
                h[hop] = aggregator(
                    [h[hop], tf.reshape(h[hop + 1], neighbor_shape)])
                if hop != len(n_samples) - 1:
                    h[hop] = Dropout(rate=dropout)(h[hop])
            h.pop()

        h = h[0]
        if output_normalize:
            h = tf.nn.l2_normalize(h, axis=1)

        super().__init__(inputs=[x, nodes, *neighbors], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr),
                     metrics=['accuracy'])
示例#16
0
    def call(self, inputs):
        edge_index, edge_weight = inputs
        num_nodes = tf.reduce_max(edge_index) + 1
        if not edge_weight:
            edge_weight = tf.ones([edge_index.shape[0]], dtype=floatx())

        edge_index, edge_weight = self.add_selfloops_edge(
            edge_index,
            num_nodes,
            edge_weight=edge_weight,
            fill_weight=self.fill_weight)

        row = tf.gather(edge_index, 0, axis=1)
        col = tf.gather(edge_index, 1, axis=1)
        deg = tf.math.unsorted_segment_sum(edge_weight,
                                           row,
                                           num_segments=num_nodes)
        deg_inv_sqrt = tf.pow(deg, self.rate)
        deg_inv_sqrt = tf.where(tf.math.is_inf(deg_inv_sqrt),
                                tf.zeros_like(deg_inv_sqrt), deg_inv_sqrt)
        deg_inv_sqrt = tf.where(tf.math.is_nan(deg_inv_sqrt),
                                tf.zeros_like(deg_inv_sqrt), deg_inv_sqrt)

        noremd_edge_weight = tf.gather(
            deg_inv_sqrt, row) * edge_weight * tf.gather(deg_inv_sqrt, col)

        return edge_index, noremd_edge_weight
示例#17
0
def edge_to_sparse_adj(edge: np.ndarray,
                       edge_weight: Optional[np.ndarray] = None,
                       shape: Optional[tuple] = None) -> sp.csr_matrix:
    """Convert (edge, edge_weight) representation to a Scipy sparse matrix

    Parameters
    ----------
    edge : np.ndarray
        edge index of sparse matrix, shape [2, M]
    edge_weight : Optional[np.ndarray], optional
        edge weight of sparse matrix, shape [M,], by default None
    shape : Optional[tuple], optional
        shape of sparse matrix, by default None

    Returns
    -------
    scipy.sparse.csr_matrix

    """

    edge = edge_transpose(edge)

    if edge_weight is None:
        edge_weight = np.ones(edge.shape[1], dtype=gg.floatx())

    if shape is None:
        shape = maybe_shape(edge)
    return sp.csr_matrix((edge_weight, edge), shape=shape)
示例#18
0
def add_selfloops_edge(edge_index, edge_weight, n_nodes=None, fill_weight=1.0):
    edge_index = edge_transpose(edge_index)

    if n_nodes is None:
        n_nodes = edge_index.max() + 1

    if edge_weight is None:
        edge_weight = np.ones(edge_index.shape[1], dtype=floatx())

    diagnal_edge_index = np.asarray(np.diag_indices(n_nodes)).astype(
        edge_index.dtype, copy=False)

    updated_edge_index = np.hstack([edge_index, diagnal_edge_index])

    diagnal_edge_weight = np.zeros(n_nodes, dtype=floatx()) + fill_weight
    updated_edge_weight = np.hstack([edge_weight, diagnal_edge_weight])

    return updated_edge_index, updated_edge_weight
示例#19
0
    def __init__(self,
                 in_features,
                 out_features,
                 alpha=0.1,
                 K=10,
                 ppr_dropout=0.,
                 hids=[64],
                 acts=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 bias=True,
                 approximated=True):

        x = Input(batch_shape=[None, in_features],
                  dtype=floatx(),
                  name='node_attr')
        adj = Input(batch_shape=[None, None],
                    dtype=floatx(),
                    sparse=approximated,
                    name='adj_matrix')

        h = x
        for hid, act in zip(hids, acts):
            h = Dense(hid,
                      use_bias=bias,
                      activation=act,
                      kernel_regularizer=regularizers.l2(weight_decay))(h)

            h = Dropout(rate=dropout)(h)

        h = Dense(out_features,
                  use_bias=bias,
                  kernel_regularizer=regularizers.l2(weight_decay))(h)
        if approximated:
            h = APPNPropagation(alpha=alpha, K=K,
                                dropout=ppr_dropout)([h, adj])
        else:
            h = PPNPropagation(dropout=ppr_dropout)([h, adj])

        super().__init__(inputs=[x, adj], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr),
                     metrics=['accuracy'])
示例#20
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hiddens=[16],
                 n_heads=[8],
                 activations=['elu'],
                 dropout=0.6,
                 l2_norm=5e-4,
                 lr=0.01,
                 use_bias=True):

        x = Input(batch_shape=[None, in_channels],
                  dtype=floatx(),
                  name='attr_matrix')
        adj = Input(batch_shape=[None, None],
                    dtype=floatx(),
                    sparse=True,
                    name='adj_matrix')
        index = Input(batch_shape=[None], dtype=intx(), name='node_index')

        h = x
        for hidden, n_head, activation in zip(hiddens, n_heads, activations):
            h = GraphAttention(
                hidden,
                attn_heads=n_head,
                reduction='concat',
                use_bias=use_bias,
                activation=activation,
                kernel_regularizer=regularizers.l2(l2_norm),
                attn_kernel_regularizer=regularizers.l2(l2_norm),
            )([h, adj])
            h = Dropout(rate=dropout)(h)

        h = GraphAttention(out_channels,
                           use_bias=use_bias,
                           attn_heads=1,
                           reduction='average')([h, adj])
        h = Gather()([h, index])

        super().__init__(inputs=[x, adj, index], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr),
                     metrics=['accuracy'])
示例#21
0
    def __init__(self,
                 in_features,
                 out_features,
                 hids=[16],
                 acts=['relu'],
                 num_attn=3,
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 bias=False):

        x = Input(batch_shape=[None, in_features],
                  dtype=floatx(),
                  name='node_attr')
        adj = Input(batch_shape=[None, None],
                    dtype=floatx(),
                    sparse=True,
                    name='adj_matrix')

        h = x
        for hid, act in zip(hids, acts):
            h = Dense(hid,
                      use_bias=bias,
                      activation=act,
                      kernel_regularizer=regularizers.l2(weight_decay))(h)
            h = Dropout(rate=dropout)(h)
        # for Cora dataset, the first propagation layer is non-trainable
        # and beta is fixed at 0
        h = SimilarityAttention(trainable=False,
                                regularizer=regularizers.l2(weight_decay))(
                                    [h, adj])
        for _ in range(1, num_attn):
            h = SimilarityAttention(regularizer=regularizers.l2(weight_decay))(
                [h, adj])

        h = Dense(out_features, use_bias=bias)(h)
        h = Dropout(rate=dropout)(h)

        super().__init__(inputs=[x, adj], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr),
                     metrics=['accuracy'])
示例#22
0
def normalize_edge(edge_index, edge_weight=None, rate=-0.5, fill_weight=1.0):
    edge_index = asedge(edge_index)

    num_nodes = edge_index.max() + 1

    if edge_weight is None:
        edge_weight = np.ones(edge_index.shape[1], dtype=gg.floatx())

    if fill_weight:
        edge_index, edge_weight = add_selfloops_edge(edge_index,
                                                     edge_weight,
                                                     num_nodes=num_nodes,
                                                     fill_weight=fill_weight)

    degree = np.bincount(edge_index[0], weights=edge_weight)
    degree_power = np.power(degree, rate, dtype=gg.floatx())
    row, col = edge_index
    edge_weight_norm = degree_power[row] * edge_weight * degree_power[col]

    return edge_index, edge_weight_norm
示例#23
0
def infer_type(x)->str:
    """Infer type of the input `x`.

     Parameters:
    ----------
    x: tf.Tensor, tf.Variable, Scipy sparse matrix,
        Numpy array-like, etc.

    Returns:
    ----------
    dtype: string, the converted type of `x`:
        1. `graphgallery.floatx()` if `x` is floating
        2. `graphgallery.intx()` if `x` is integer
        3. `'bool'` if `x` is bool.

    """

    # For tensor or variable
    if is_tf_tensor(x):
        if x.dtype.is_floating:
            return floatx()
        elif x.dtype.is_integer or x.dtype.is_unsigned:
            return intx()
        elif x.dtype.is_bool:
            return 'bool'
        else:
            raise RuntimeError(f'Invalid input of `{type(x)}`')

    if not hasattr(x, 'dtype'):
        x = np.asarray(x)

    if x.dtype.kind in {'f', 'c'}:
        return floatx()
    elif x.dtype.kind in {'i', 'u'}:
        return intx()
    elif x.dtype.kind == 'b':
        return 'bool'
    elif x.dtype.kind == 'O':
        raise RuntimeError(f'Invalid inputs of `{x}`.')
    else:
        raise RuntimeError(f'Invalid input of `{type(x)}`')
示例#24
0
    def __init__(self, in_channels, out_channels,
                 hiddens=[64],
                 activations=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01, kl=5e-4, gamma=1.,
                 use_bias=False):

        _floatx = floatx()
        x = Input(batch_shape=[None, in_channels],
                  dtype=_floatx, name='node_attr')
        adj = [Input(batch_shape=[None, None], dtype=_floatx,
                     sparse=True, name='adj_matrix_1'),
               Input(batch_shape=[None, None], dtype=_floatx, sparse=True,
                     name='adj_matrix_2')]
        index = Input(batch_shape=[None], dtype=intx(), name='node_index')

        h = x
        if hiddens:
            mean, var = GaussionConvolution_F(hiddens[0], gamma=gamma,
                                              use_bias=use_bias,
                                              activation=activations[0],
                                              kernel_regularizer=regularizers.l2(weight_decay))([h, *adj])
            if kl:
                KL_divergence = 0.5 * \
                    tf.reduce_mean(tf.math.square(mean) + var -
                                   tf.math.log(1e-8 + var) - 1, axis=1)
                KL_divergence = tf.reduce_sum(KL_divergence)

                # KL loss
                kl_loss = kl * KL_divergence

        # additional layers (usually unnecessay)
        for hidden, activation in zip(hiddens[1:], activations[1:]):

            mean, var = GaussionConvolution_D(
                hidden, gamma=gamma, use_bias=use_bias, activation=activation)([mean, var, *adj])
            mean = Dropout(rate=dropout)(mean)
            var = Dropout(rate=dropout)(var)

        mean, var = GaussionConvolution_D(
            out_channels, gamma=gamma, use_bias=use_bias)([mean, var, *adj])

        h = Sample()([mean, var])
        h = Gather()([h, index])

        super().__init__(inputs=[x, *adj, index], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr), metrics=['accuracy'])

        if hiddens and kl:
            self.add_loss(kl_loss)
示例#25
0
def sparse_edges_to_sparse_tensor(edge_index: np.ndarray, edge_weight: np.ndarray = None, shape: tuple = None):
    """
    edge_index: shape [2, M]
    edge_weight: shape [M,]
    """
    if edge_weight is None:
        edge_weight = tf.ones(edge_index.shape[1], dtype=floatx())

    if shape is None:
        N = np.max(edge_index) + 1
        shape = (N, N)

    return tf.SparseTensor(edge_index.T, edge_weight, shape)
示例#26
0
    def __init__(self,
                 in_features,
                 out_features,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 K=2,
                 bias=False):

        x = Input(batch_shape=[None, in_features],
                  dtype=floatx(),
                  name='node_attr')
        adj = [
            Input(batch_shape=[None, None],
                  dtype=floatx(),
                  sparse=True,
                  name=f'adj_matrix_{i}') for i in range(K + 1)
        ]

        h = x
        for hid, act in zip(hids, acts):
            h = ChebConv(
                hid,
                K=K,
                use_bias=bias,
                activation=act,
                kernel_regularizer=regularizers.l2(weight_decay))([h, adj])
            h = Dropout(rate=dropout)(h)

        h = ChebConv(out_features, K=K, use_bias=bias)([h, adj])

        super().__init__(inputs=[x, *adj], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr),
                     metrics=['accuracy'])
示例#27
0
def astensor(x, *, dtype=None, device=None, escape=None):

    try:
        if x is None or (escape is not None and isinstance(x, escape)):
            return x
    except TypeError:
        raise TypeError(f"argument 'escape' must be a type or tuple of types.")

    if dtype is None:
        dtype = gf.infer_type(x)

    if isinstance(dtype, (np.dtype, str)):
        dtype = data_type_dict().get(str(dtype), dtype)
    elif not isinstance(dtype, torch.dtype):
        raise TypeError(
            f"argument 'dtype' must be torch.dtype, np.dtype or str, but got {type(dtype)}."
        )

    if is_tensor(x):
        tensor = x.to(dtype)
    elif gf.is_tensor(x, backend='tensorflow'):
        return astensor(gf.tensoras(x),
                        dtype=dtype,
                        device=device,
                        escape=escape)
    elif sp.isspmatrix(x):
        if gg.backend() == "dgl_torch":
            import dgl
            tensor = dgl.from_scipy(x, idtype=getattr(torch, gg.intx()))
        elif gg.backend() == "pyg":
            edge_index, edge_weight = gf.sparse_adj_to_edge(x)
            return (astensor(edge_index,
                             dtype=gg.intx(),
                             device=device,
                             escape=escape),
                    astensor(edge_weight,
                             dtype=gg.floatx(),
                             device=device,
                             escape=escape))
        else:
            tensor = sparse_adj_to_sparse_tensor(x, dtype=dtype)
    elif any((isinstance(x, (np.ndarray, np.matrix)), gg.is_listlike(x),
              gg.is_scalar(x))):
        tensor = torch.tensor(x, dtype=dtype, device=device)
    else:
        raise TypeError(
            f"Invalid type of inputs. Allowed data type (Tensor, SparseTensor, Numpy array, Scipy sparse tensor, None), but got {type(x)}."
        )
    return tensor.to(device)
示例#28
0
    def __init__(self, in_features, out_features,
                 hids=[16],
                 acts=['elu'],
                 dropout=0.5,
                 order=2,
                 iterations=1,
                 weight_decay=5e-5,
                 share_weights=True,
                 lr=0.01, bias=True):

        x = Input(batch_shape=[None, in_features],
                  dtype=floatx(), name='node_attr')
        adj = Input(batch_shape=[None, None], dtype=floatx(),
                    sparse=True, name='adj_matrix')

        h = x
        for hid, act in zip(hids, acts):
            h = ARMAConv(hid, use_bias=bias,
                         activation=act,
                         order=order,
                         iterations=iterations,
                         gcn_activation="elu",
                         share_weights=share_weights,
                         kernel_regularizer=regularizers.l2(weight_decay))([h, adj])
            h = Dropout(rate=dropout)(h)

        h = ARMAConv(out_features,
                     use_bias=bias,
                     order=1,
                     iterations=1,
                     gcn_activation=None,
                     share_weights=share_weights)([h, adj])

        super().__init__(inputs=[x, adj], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr), metrics=['accuracy'])
示例#29
0
def sparse_edge_to_sparse_tensor(edge_index: np.ndarray,
                                 edge_weight: np.ndarray = None,
                                 shape: tuple = None) -> tf.SparseTensor:
    """
    edge_index: shape [2, M]
    edge_weight: shape [M,]
    """
    edge_index = gf.edge_transpose(edge_index)

    if edge_weight is None:
        edge_weight = tf.ones(edge_index.shape[1], dtype=gg.floatx())

    if shape is None:
        shape = gf.maybe_shape(edge_index)

    return tf.SparseTensor(edge_index.T, edge_weight, shape)
示例#30
0
def sparse_edge_to_sparse_tensor(edge_index: np.ndarray,
                                 edge_weight: np.ndarray = None,
                                 shape: tuple = None) -> tf.SparseTensor:
    """
    edge_index: shape [M, 2] or [2, M]
    edge_weight: shape [M,]
    """
    edge_index = gf.asedge(edge_index, shape="row_wise")

    if edge_weight is None:
        edge_weight = tf.ones(edge_index.shape[0], dtype=gg.floatx())

    if shape is None:
        shape = gf.maybe_shape(edge_index)

    return tf.SparseTensor(edge_index, edge_weight, shape)