def infer_type(x):
    """Infer type of the input `x`.

    Parameters:
    ----------
    x: Any python object

    Returns:
    ----------
    dtype: string, the converted type of `x`:
        1. `graphgallery.floatx()` if `x` is floating
        2. `graphgallery.intx()` if `x` is integer
        3. `'bool'` if `x` is bool.

    """
    # For tensor or variable
    if is_th_tensor(x):
        if x.dtype.is_floating_point:
            return floatx()
        elif x.dtype == torch.bool:
            return 'bool'
        elif 'int' in str(x.dtype):
            return intx()
        else:
            raise RuntimeError(f'Invalid input of `{type(x)}`')

    elif is_tf_tensor(x):
        if x.dtype.is_floating:
            return floatx()
        elif x.dtype.is_integer or x.dtype.is_unsigned:
            return intx()
        elif x.dtype.is_bool:
            return 'bool'
        else:
            raise RuntimeError(f'Invalid input of `{type(x)}`')

    if not hasattr(x, 'dtype'):
        x = np.asarray(x)

    if x.dtype.kind in {'f', 'c'}:
        return floatx()
    elif x.dtype.kind in {'i', 'u'}:
        return intx()
    elif x.dtype.kind == 'b':
        return 'bool'
    elif x.dtype.kind == 'O':
        raise RuntimeError(f'Invalid inputs of `{x}`.')
    else:
        raise RuntimeError(f'Invalid input of `{type(x)}`')
Exemple #2
0
def infer_type(x: Any) -> str:
    """Infer the type of the input 'x'.

    Parameters:
    ----------
    x: Any python object

    Returns:
    ----------
    dtype: string, the proper data type of 'x':
        1. 'graphgallery.floatx()' if 'x' is floating,
        2. 'graphgallery.intx()' if 'x' is integer,
        3. 'graphgallery.boolx()' if 'x' is boolean.
    """
    # For tensor or variable
    if pytorch.is_tensor(x):
        if x.dtype.is_floating_point:
            return gg.floatx()
        elif x.dtype == torch.bool:
            return gg.boolx()
        elif 'int' in str(x.dtype):
            return gg.intx()
        else:
            raise TypeError(f"Invalid type of pytorch Tensor: '{type(x)}'")

    elif tensorflow.is_tensor(x):
        if x.dtype.is_floating:
            return gg.floatx()
        elif x.dtype.is_integer or x.dtype.is_unsigned:
            return gg.intx()
        elif x.dtype.is_bool:
            return gg.boolx()
        else:
            raise TypeError(f"Invalid type of tensorflow Tensor: '{type(x)}'")

    _x = x
    if not hasattr(_x, 'dtype'):
        _x = np.asarray(_x)

    if _x.dtype.kind in {'f', 'c'}:
        return gg.floatx()
    elif _x.dtype.kind in {'i', 'u'}:
        return gg.intx()
    elif _x.dtype.kind == 'b':
        return gg.boolx()
    elif _x.dtype.kind == 'O':
        raise TypeError(f"Invalid inputs of '{x}'.")
    else:
        raise TypeError(f"Invalid input of '{type(x).__name__}'.")
Exemple #3
0
def astensor(x, *, dtype=None, device=None, escape=None):

    try:
        if x is None or (escape is not None and isinstance(x, escape)):
            return x
    except TypeError:
        raise TypeError(f"argument 'escape' must be a type or tuple of types.")

    if dtype is None:
        dtype = gf.infer_type(x)

    if isinstance(dtype, (np.dtype, str)):
        dtype = data_type_dict().get(str(dtype), dtype)
    elif not isinstance(dtype, torch.dtype):
        raise TypeError(
            f"argument 'dtype' must be torch.dtype, np.dtype or str, but got {type(dtype)}."
        )

    if is_tensor(x):
        tensor = x.to(dtype)
    elif gf.is_tensor(x, backend='tensorflow'):
        return astensor(gf.tensoras(x),
                        dtype=dtype,
                        device=device,
                        escape=escape)
    elif sp.isspmatrix(x):
        if gg.backend() == "dgl_torch":
            import dgl
            tensor = dgl.from_scipy(x, idtype=getattr(torch, gg.intx()))
        elif gg.backend() == "pyg":
            edge_index, edge_weight = gf.sparse_adj_to_edge(x)
            return (astensor(edge_index,
                             dtype=gg.intx(),
                             device=device,
                             escape=escape),
                    astensor(edge_weight,
                             dtype=gg.floatx(),
                             device=device,
                             escape=escape))
        else:
            tensor = sparse_adj_to_sparse_tensor(x, dtype=dtype)
    elif any((isinstance(x, (np.ndarray, np.matrix)), gg.is_listlike(x),
              gg.is_scalar(x))):
        tensor = torch.tensor(x, dtype=dtype, device=device)
    else:
        raise TypeError(
            f"Invalid type of inputs. Allowed data type (Tensor, SparseTensor, Numpy array, Scipy sparse tensor, None), but got {type(x)}."
        )
    return tensor.to(device)
Exemple #4
0
    def __init__(self, in_channels, out_channels, 
                 hiddens=[16], 
                 activations=['relu'], 
                 dropout=0.5,
                 l2_norm=5e-4, 
                 lr=0.01, use_bias=False):
        
        x = Input(batch_shape=[None, in_channels],
                  dtype=floatx(), name='attr_matrix')
        adj = Input(batch_shape=[None, None], dtype=floatx(), 
                    sparse=True, name='adj_matrix')
        index = Input(batch_shape=[None], dtype=intx(), name='node_index')

        h = x
        for hidden, activation in zip(hiddens, activations):
            h = GraphConvAttribute(hidden, use_bias=use_bias,
                                 activation=activation,
                                 kernel_regularizer=regularizers.l2(l2_norm))([h, adj])

            h = Dropout(rate=dropout)(h)

        h = GraphConvAttribute(out_channels, use_bias=use_bias)([h, adj])
        h = Gather()([h, index])

        super().__init__(inputs=[x, adj, index], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                      optimizer=Adam(lr=lr), metrics=['accuracy'])        
Exemple #5
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hiddens=[32],
                 activations=['relu'],
                 dropout=0.5,
                 l2_norm=5e-4,
                 lr=0.01,
                 use_bias=True,
                 aggregator='mean',
                 output_normalize=False,
                 n_samples=[15, 5]):

        Agg = _AGG.get(aggregator, None)
        if not Agg:
            raise ValueError(
                f"Invalid value of 'aggregator', allowed values {tuple(_AGG.keys())}, but got '{aggregator}'."
            )

        _intx = intx()
        x = Input(batch_shape=[None, in_channels],
                  dtype=floatx(),
                  name='attr_matrix')
        nodes = Input(batch_shape=[None], dtype=_intx, name='nodes')
        neighbors = [
            Input(batch_shape=[None], dtype=_intx, name=f'neighbors_{hop}')
            for hop, n_sample in enumerate(n_samples)
        ]

        aggregators = []
        for hidden, activation in zip(hiddens, activations):
            # you can use `GCNAggregator` instead
            aggregators.append(
                Agg(hidden,
                    concat=True,
                    activation=activation,
                    use_bias=use_bias,
                    kernel_regularizer=regularizers.l2(l2_norm)))

        aggregators.append(Agg(out_channels, use_bias=use_bias))

        h = [tf.nn.embedding_lookup(x, node) for node in [nodes, *neighbors]]
        for agg_i, aggregator in enumerate(aggregators):
            attribute_shape = h[0].shape[-1]
            for hop in range(len(n_samples) - agg_i):
                neighbor_shape = [-1, n_samples[hop], attribute_shape]
                h[hop] = aggregator(
                    [h[hop], tf.reshape(h[hop + 1], neighbor_shape)])
                if hop != len(n_samples) - 1:
                    h[hop] = Dropout(rate=dropout)(h[hop])
            h.pop()

        h = h[0]
        if output_normalize:
            h = tf.nn.l2_normalize(h, axis=1)

        super().__init__(inputs=[x, nodes, *neighbors], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr),
                     metrics=['accuracy'])
Exemple #6
0
    def __init__(self, in_channels, out_channels,
                 hiddens=[16],
                 activations=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01, order=2, use_bias=False):

        x = Input(batch_shape=[None, in_channels],
                  dtype=floatx(), name='node_attr')
        adj = [Input(batch_shape=[None, None],
                     dtype=floatx(), sparse=True,
                     name=f'adj_matrix_{i}') for i in range(order + 1)]
        index = Input(batch_shape=[None], dtype=intx(), name='node_index')

        h = x
        for hidden, activation in zip(hiddens, activations):
            h = ChebyConvolution(hidden, order=order, use_bias=use_bias,
                                 activation=activation,
                                 kernel_regularizer=regularizers.l2(weight_decay))([h, adj])
            h = Dropout(rate=dropout)(h)

        h = ChebyConvolution(out_channels,
                             order=order, use_bias=use_bias)([h, adj])
        h = Gather()([h, index])

        super().__init__(inputs=[x, *adj, index], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr), metrics=['accuracy'])
def neighbor_sampler(adj_matrix: sp.csr_matrix, max_degree: int = 25,
                     selfloop: bool = False):
    adj_matrix = adj_matrix.tocsr(copy=False)
    N = adj_matrix.shape[0]
    neighbors_matrix = N * np.ones((N + 1, max_degree), dtype=intx())
    for nodeid in range(N):
        neighbors = adj_matrix[nodeid].indices

#         if not selfloop:
#             neighbors = np.setdiff1d(neighbors, [nodeid])
#         else:
#             neighbors = np.intersect1d(neighbors, [nodeid])

        size = neighbors.size
        if size == 0:
            continue

        if size > max_degree:
            neighbors = np.random.choice(neighbors, max_degree, replace=False)
        elif size < max_degree:
            neighbors = np.random.choice(neighbors, max_degree, replace=True)

        neighbors_matrix[nodeid] = neighbors

    np.random.shuffle(neighbors_matrix.T)
    return neighbors_matrix
Exemple #8
0
def asintarr(x, dtype: str = None):
    """Convert `x` to interger Numpy array.

    Parameters:
    ----------
    x: Tensor, Scipy sparse matrix,
        Numpy array-like, etc.

    Returns:
    ----------
    Integer Numpy array with dtype or `graphgallery.intx()`

    """
    if dtype is None:
        dtype = intx()

    if is_tensor(x):
        if x.dtype != dtype:
            kind = backend().kind
            if kind == "T":
                x = tf.cast(x, dtype=dtype)
            else:
                x = x.to(getattr(torch, dtype))
        return x

    if is_interger_scalar(x):
        x = np.asarray([x], dtype=dtype)
    elif is_list_like(x) or isinstance(x, (np.ndarray, np.matrix)):
        x = np.asarray(x, dtype=dtype)
    else:
        raise ValueError(
            f"Invalid input which should be either array-like or integer scalar, but got {type(x)}."
        )
    return x
Exemple #9
0
    def __init__(self, in_features, out_features,
                 hids=[16], acts=['relu'], dropout=0.5,
                 weight_decay=5e-4, lr=0.01, bias=False):

        _intx = intx()
        _floatx = floatx()
        x = Input(batch_shape=[None, in_features],
                  dtype=_floatx, name='node_attr')
        edge_index = Input(batch_shape=[None, 2], dtype=_intx,
                           name='edge_index')
        edge_weight = Input(batch_shape=[None], dtype=_floatx,
                            name='edge_weight')

        h = x
        for hid, act in zip(hids, acts):
            h = GraphEdgeConvolution(hid, use_bias=bias,
                                     activation=act,
                                     kernel_regularizer=regularizers.l2(weight_decay))([h, edge_index, edge_weight])

            h = Dropout(rate=dropout)(h)

        h = GraphEdgeConvolution(out_features, use_bias=bias)(
            [h, edge_index, edge_weight])

        super().__init__(inputs=[x, edge_index, edge_weight], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr), metrics=['accuracy'])
Exemple #10
0
def infer_type(x)->str:
    """Infer type of the input `x`.

     Parameters:
    ----------
    x: tf.Tensor, tf.Variable, Scipy sparse matrix,
        Numpy array-like, etc.

    Returns:
    ----------
    dtype: string, the converted type of `x`:
        1. `graphgallery.floatx()` if `x` is floating
        2. `graphgallery.intx()` if `x` is integer
        3. `'bool'` if `x` is bool.

    """

    # For tensor or variable
    if is_tf_tensor(x):
        if x.dtype.is_floating:
            return floatx()
        elif x.dtype.is_integer or x.dtype.is_unsigned:
            return intx()
        elif x.dtype.is_bool:
            return 'bool'
        else:
            raise RuntimeError(f'Invalid input of `{type(x)}`')

    if not hasattr(x, 'dtype'):
        x = np.asarray(x)

    if x.dtype.kind in {'f', 'c'}:
        return floatx()
    elif x.dtype.kind in {'i', 'u'}:
        return intx()
    elif x.dtype.kind == 'b':
        return 'bool'
    elif x.dtype.kind == 'O':
        raise RuntimeError(f'Invalid inputs of `{x}`.')
    else:
        raise RuntimeError(f'Invalid input of `{type(x)}`')
Exemple #11
0
    def __init__(self, in_channels, out_channels,
                 hiddens=[64],
                 activations=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01, kl=5e-4, gamma=1.,
                 use_bias=False):

        _floatx = floatx()
        x = Input(batch_shape=[None, in_channels],
                  dtype=_floatx, name='node_attr')
        adj = [Input(batch_shape=[None, None], dtype=_floatx,
                     sparse=True, name='adj_matrix_1'),
               Input(batch_shape=[None, None], dtype=_floatx, sparse=True,
                     name='adj_matrix_2')]
        index = Input(batch_shape=[None], dtype=intx(), name='node_index')

        h = x
        if hiddens:
            mean, var = GaussionConvolution_F(hiddens[0], gamma=gamma,
                                              use_bias=use_bias,
                                              activation=activations[0],
                                              kernel_regularizer=regularizers.l2(weight_decay))([h, *adj])
            if kl:
                KL_divergence = 0.5 * \
                    tf.reduce_mean(tf.math.square(mean) + var -
                                   tf.math.log(1e-8 + var) - 1, axis=1)
                KL_divergence = tf.reduce_sum(KL_divergence)

                # KL loss
                kl_loss = kl * KL_divergence

        # additional layers (usually unnecessay)
        for hidden, activation in zip(hiddens[1:], activations[1:]):

            mean, var = GaussionConvolution_D(
                hidden, gamma=gamma, use_bias=use_bias, activation=activation)([mean, var, *adj])
            mean = Dropout(rate=dropout)(mean)
            var = Dropout(rate=dropout)(var)

        mean, var = GaussionConvolution_D(
            out_channels, gamma=gamma, use_bias=use_bias)([mean, var, *adj])

        h = Sample()([mean, var])
        h = Gather()([h, index])

        super().__init__(inputs=[x, *adj, index], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr), metrics=['accuracy'])

        if hiddens and kl:
            self.add_loss(kl_loss)
Exemple #12
0
    def add_selfloops_edge(edge_index, n_nodes, edge_weight=None, fill_weight=1.0):
        diagnal_edge_index = tf.reshape(
            tf.repeat(tf.range(n_nodes, dtype=intx()), 2), [n_nodes, 2])

        updated_edge_index = tf.concat(
            [edge_index, diagnal_edge_index], axis=0)

        if edge_weight:
            diagnal_edge_weight = tf.cast(
                tf.fill([n_nodes], fill_weight), dtype=floatx())
            updated_edge_weight = tf.concat(
                [edge_weight, diagnal_edge_weight], axis=0)

        else:
            updated_edge_weight = None

        return updated_edge_index, updated_edge_weight
Exemple #13
0
    def __init__(self, *, device="cpu", seed=None, name=None, **kwargs):
        """
        Parameters:
        ----------
        device: string. optional
            The device where the model running on.
        seed: interger scalar. optional
            Used to create a reproducible sequence of tensors
            across multiple calls.
        name: string. optional
            Specified name for the model. (default: :str: `class name`)
        kwargs: other custom keyword arguments. 
        """
        # if graph is not None and not isinstance(graph, gg.data.BaseGraph):
        #     raise ValueError(f"Unrecognized graph: {graph}.")

        kwargs.pop("self", None)
        kwargs.pop("__class__", None)

        cfg = gg.CfgNode()
        cfg.merge_from_dict(kwargs)
        cfg.intx = self.intx = gg.intx()
        cfg.floatx = self.floatx = gg.floatx()
        cfg.boolx = self.boolx = gg.boolx()
        cfg.seed = self.seed = seed
        cfg.name = self.name = name or self.__class__.__name__
        cfg.device = device
        _backend = gg.backend()
        cfg.backend = getattr(_backend, "name", None)

        if seed:
            gf.random_seed(seed, _backend)

        self.device = gf.device(device, _backend)
        self.data_device = self.device
        self.backend = _backend

        # data types, default: `float32`,`int32` and `bool`
        self._cache = gf.BunchDict()
        self.transform = gf.BunchDict()

        self._model = None
        self._graph = None
        self.cfg = cfg
        self.setup_cfg()
        self.custom_setup()
Exemple #14
0
    def __init__(self, *graph, device="cpu:0", seed=None, name=None, **kwargs):
        """

        Parameters:
        ----------
            graph: Graph or MultiGraph.
            device: string. optional
                The device where the model running on.
            seed: interger scalar. optional
                Used in combination with `tf.random.set_seed` & `np.random.seed`
                & `random.seed` to create a reproducible sequence of tensors
                across multiple calls.
            name: string. optional
                Specified name for the model. (default: :str: `class.__name__`)
            kwargs: other custom keyword parameters.

        """
        graph = parse_graph_inputs(*graph)
        _backend = backend()
        self.backend = _backend
        self.kind = _backend.kind

        raise_if_kwargs(kwargs)

        if seed is not None:
            np.random.seed(seed)
            random.seed(seed)
            if self.kind == "P":
                torch.manual_seed(seed)
                torch.cuda.manual_seed(seed)
                # torch.cuda.manual_seed_all(seed)
            else:
                tf.random.set_seed(seed)

        if name is None:
            name = self.__class__.__name__

        self.seed = seed
        self.name = name
        self.graph = graph.copy()
        self.device = parse_device(device, self.kind)

        # data types, default: `float32` and `int32`
        self.floatx = floatx()
        self.intx = intx()
Exemple #15
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hiddens=[64],
                 activations=['relu'],
                 dropout=0.5,
                 weight_decay=5e-3,
                 lr=0.01,
                 use_bias=False,
                 K=10):

        x = Input(batch_shape=[None, in_channels],
                  dtype=floatx(),
                  name='node_attr')
        adj = Input(batch_shape=[None, None],
                    dtype=floatx(),
                    sparse=True,
                    name='adj_matrix')
        index = Input(batch_shape=[None], dtype=intx(), name='node_index')

        h = x
        for hidden, activation in zip(hiddens, activations):
            h = Dense(hidden,
                      use_bias=use_bias,
                      activation=activation,
                      kernel_regularizer=regularizers.l2(weight_decay))(h)
            h = Dropout(dropout)(h)

        h = Dense(out_channels,
                  use_bias=use_bias,
                  activation=activations[-1],
                  kernel_regularizer=regularizers.l2(weight_decay))(h)
        h = Dropout(dropout)(h)

        h = PropConvolution(
            K,
            use_bias=use_bias,
            activation='sigmoid',
            kernel_regularizer=regularizers.l2(weight_decay))([h, adj])
        h = Gather()([h, index])

        super().__init__(inputs=[x, adj, index], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr),
                     metrics=['accuracy'])
Exemple #16
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 num_nodes,
                 hiddens=[16],
                 activations=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 use_bias=False):

        _floatx = floatx()
        x = Input(batch_shape=[None, in_channels],
                  dtype=_floatx,
                  name='node_attr')
        wavelet = Input(batch_shape=[num_nodes, num_nodes],
                        dtype=_floatx,
                        sparse=True,
                        name='wavelet_matrix')
        inverse_wavelet = Input(batch_shape=[num_nodes, num_nodes],
                                dtype=_floatx,
                                sparse=True,
                                name='inverse_wavelet_matrix')
        index = Input(batch_shape=[None], dtype=intx(), name='node_index')

        h = x
        for hidden, activation in zip(hiddens, activations):
            h = WaveletConvolution(
                hidden,
                activation=activation,
                use_bias=use_bias,
                kernel_regularizer=regularizers.l2(weight_decay))(
                    [h, wavelet, inverse_wavelet])
            h = Dropout(rate=dropout)(h)

        h = WaveletConvolution(
            out_channels, use_bias=use_bias)([h, wavelet, inverse_wavelet])
        h = Gather()([h, index])

        super().__init__(inputs=[x, wavelet, inverse_wavelet, index],
                         outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr),
                     metrics=['accuracy'])
    def predict(self, predict_data=None, return_prob=True):
        """
        Predict the output probability for the input data.

        Note:
        ----------
        You must compile your model before training/testing/predicting.
            Use `model.build()`.

        Parameters:
        ----------
        predict_data: Numpy 1D array, optional.
            The indices of objects to predict.
            if None, predict the all objects.

        return_prob: bool.
            whether to return the probability of prediction.

        Return:
        ----------
        The predicted probability of each class for each object,
            for node classification task, it has shape 
            (num_nodes, num_node_classes).

        """

        if not self.model:
            raise RuntimeError(
                'You must compile your model before training/testing/predicting. Use `model.build()`.'
            )

        if predict_data is None:
            predict_data = np.arange(self.graph.num_nodes, dtype=gg.intx())

        if not isinstance(predict_data, Sequence):
            predict_data = self.predict_sequence(predict_data)

        self.predict_data = predict_data

        logit = self.predict_step(predict_data)
        if return_prob:
            logit = softmax(logit)
        return logit
Exemple #18
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hiddens=[16],
                 activations=['relu'],
                 dropout=0.5,
                 l2_norm=5e-4,
                 lr=0.01,
                 use_bias=False):

        _intx = intx()
        _floatx = floatx()
        x = Input(batch_shape=[None, in_channels],
                  dtype=_floatx,
                  name='attr_matrix')
        edge_index = Input(batch_shape=[None, 2],
                           dtype=_intx,
                           name='edge_index')
        edge_weight = Input(batch_shape=[None],
                            dtype=_floatx,
                            name='edge_weight')
        index = Input(batch_shape=[None], dtype=_intx, name='node_index')

        h = x
        for hidden, activation in zip(hiddens, activations):
            h = GraphEdgeConvolution(
                hidden,
                use_bias=use_bias,
                activation=activation,
                kernel_regularizer=regularizers.l2(l2_norm))(
                    [h, edge_index, edge_weight])

            h = Dropout(rate=dropout)(h)

        h = GraphEdgeConvolution(
            out_channels, use_bias=use_bias)([h, edge_index, edge_weight])
        output = Gather()([h, index])

        super().__init__(inputs=[x, edge_index, edge_weight, index],
                         outputs=output)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr),
                     metrics=['accuracy'])
Exemple #19
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hiddens=[16],
                 n_heads=[8],
                 activations=['elu'],
                 dropout=0.6,
                 weight_decay=5e-4,
                 lr=0.01,
                 use_bias=True):

        x = Input(batch_shape=[None, in_channels],
                  dtype=floatx(),
                  name='node_attr')
        adj = Input(batch_shape=[None, None],
                    dtype=floatx(),
                    sparse=True,
                    name='adj_matrix')
        index = Input(batch_shape=[None], dtype=intx(), name='node_index')

        h = x
        for hidden, n_head, activation in zip(hiddens, n_heads, activations):
            h = GraphAttention(
                hidden,
                attn_heads=n_head,
                reduction='concat',
                use_bias=use_bias,
                activation=activation,
                kernel_regularizer=regularizers.l2(weight_decay),
                attn_kernel_regularizer=regularizers.l2(weight_decay),
            )([h, adj])
            h = Dropout(rate=dropout)(h)

        h = GraphAttention(out_channels,
                           use_bias=use_bias,
                           attn_heads=1,
                           reduction='average')([h, adj])
        h = Gather()([h, index])

        super().__init__(inputs=[x, adj, index], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr),
                     metrics=['accuracy'])
Exemple #20
0
def astensor(x, *, dtype=None, device=None, escape=None):

    try:
        if x is None or (escape is not None and isinstance(x, escape)):
            return x
    except TypeError:
        raise TypeError(f"argument 'escape' must be a type or tuple of types.")
    if dtype is None:
        dtype = gf.infer_type(x)
    elif isinstance(dtype, tf.dtypes.DType):
        dtype = dtype.name
    elif isinstance(dtype, (np.dtype, str)):
        dtype = str(dtype)
    else:
        raise TypeError(
            f"argument 'dtype' must be tf.dtypes.DType, np.dtype or str, but got {type(dtype)}."
        )

    with tf.device(device):
        if is_tensor(x):
            if x.dtype != dtype:
                return tf.cast(x, dtype=dtype)
            return tf.identity(x)
        elif gf.is_tensor(x, backend='torch'):
            return astensor(gf.tensoras(x),
                            dtype=dtype,
                            device=device,
                            escape=escape)
        elif sp.isspmatrix(x):
            if gg.backend() == "dgl_tf":
                import dgl
                return dgl.from_scipy(x, idtype=getattr(tf,
                                                        gg.intx())).to(device)
            else:
                return sparse_adj_to_sparse_tensor(x, dtype=dtype)
        elif any((isinstance(x, (np.ndarray, np.matrix)), gg.is_listlike(x),
                  gg.is_scalar(x))):
            return tf.convert_to_tensor(x, dtype=dtype)
        else:
            raise TypeError(
                f"Invalid type of inputs. Allowed data type(Tensor, SparseTensor, Numpy array, Scipy sparse matrix, None), but got {type(x)}."
            )
Exemple #21
0
    def __init__(self, graph, device="cpu", seed=None, name=None, **kwargs):
        """

        Parameters:
        ----------
        graph: Graph or MultiGraph.
        device: string. optional
            The device where the model running on.
        seed: interger scalar. optional
            Used in combination with `tf.random.set_seed` & `np.random.seed`
            & `random.seed` to create a reproducible sequence of tensors
            across multiple calls.
        name: string. optional
            Specified name for the model. (default: :str: `class.__name__`)
        kwargs: other custom keyword arguments. 
        """
        if not isinstance(graph, gg.data.BaseGraph):
            raise ValueError(f"Unrecognized graph: {graph}.")

        _backend = gg.backend()

        # It currently takes no keyword arguments
        gg.utils.raise_error.raise_if_kwargs(kwargs)

        if seed:
            gf.random_seed(seed, _backend)

        if name is None:
            name = self.__class__.__name__

        self.seed = seed
        self.name = name
        self.graph = graph.copy()
        self.device = gf.device(device, _backend)
        self.backend = _backend

        # data types, default: `float32`,`int32` and `bool`
        self.floatx = gg.floatx()
        self.intx = gg.intx()
        self.boolx = gg.boolx()
        self._cache = gf.BunchDict()
Exemple #22
0
    def predict(self, index=None, return_prob=True):
        """
        Predict the output probability for the input node index.


        Note:
        ----------
        You must compile your model before training/testing/predicting.
            Use `model.build()`.

        Parameters:
        ----------
        index: Numpy 1D array, optional.
            The indices of nodes to predict.
            if None, predict the all nodes.

        return_prob: bool.
            whether to return the probability of prediction.

        Return:
        ----------
        The predicted probability of each class for each node,
            shape (n_nodes, n_classes).

        """

        if not self.model:
            raise RuntimeError(
                'You must compile your model before training/testing/predicting. Use `model.build()`.'
            )

        if index is None:
            index = np.arange(self.graph.n_nodes, dtype=intx())
        else:
            index = asintarr(index)
        sequence = self.predict_sequence(index)
        logit = self.predict_step(sequence)
        if return_prob:
            logit = softmax(logit)
        return logit
def asarray(x: Any, dtype: Optional[str] = None) -> np.ndarray:
    """Convert `x` to interger Numpy array.

    Parameters:
    ----------
    x: Tensor, Scipy sparse matrix,
        Numpy array-like, etc.

    Returns:
    ----------
    Integer Numpy array with dtype or `graphgallery.intx()`

    """
    if dtype is None:
        dtype = gg.intx()

    if gf.is_tensor(x, backend="tensorflow"):
        if x.dtype != dtype:
            return tf.cast(x, dtype=dtype)
        else:
            return x

    if gf.is_tensor(x, backend="torch"):
        if x.dtype != dtype:
            return x.to(getattr(torch, dtype))
        else:
            return x

    if gg.is_intscalar(x):
        x = np.asarray([x], dtype=dtype)
    elif gg.is_listlike(x) or (isinstance(x, np.ndarray) and x.dtype != "O"):
        x = np.asarray(x, dtype=dtype)
    else:
        raise ValueError(
            f"Invalid input which should be either array-like or integer scalar, but got {type(x)}."
        )
    return x