コード例 #1
0
class SGCN:
    def __init__(self, K):
        self.K = K

    def __call__(self, graph):
        out = graph.a
        for _ in range(self.K - 1):
            out = out.dot(out)
        out.sort_indices()
        graph.a = out
        return graph


# Load data
K = 2  # Propagation steps for SGCN
dataset = Citation("cora", transforms=[LayerPreprocess(GCNConv), SGCN(K)])
mask_tr, mask_va, mask_te = dataset.mask_tr, dataset.mask_va, dataset.mask_te

# Parameters
l2_reg = 5e-6  # L2 regularization rate
learning_rate = 0.2  # Learning rate
epochs = 20000  # Number of training epochs
patience = 200  # Patience for early stopping
a_dtype = dataset[0].a.dtype  # Only needed for TF 2.1

N = dataset.n_nodes  # Number of nodes in the graph
F = dataset.n_node_features  # Original size of node features
n_out = dataset.n_labels  # Number of classes

# Model definition
x_in = Input(shape=(F, ))
コード例 #2
0
ファイル: citation_gcn.py プロジェクト: zdqf/spektral
from tensorflow.keras.layers import Input, Dropout
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2

from spektral.data.loaders import SingleLoader
from spektral.datasets.citation import Citation
from spektral.layers import GCNConv
from spektral.transforms import LayerPreprocess, AdjToSpTensor

tf.random.set_seed(seed=0)  # make weight initialization reproducible

# Load data
dataset = Citation('cora',
                   transforms=[LayerPreprocess(GCNConv),
                               AdjToSpTensor()])


# We convert the binary masks to sample weights so that we can compute the
# average loss over the nodes (following original implementation by
# Kipf & Welling)
def mask_to_weights(mask):
    return mask / np.count_nonzero(mask)


weights_tr, weights_va, weights_te = (mask_to_weights(mask)
                                      for mask in (dataset.mask_tr,
                                                   dataset.mask_va,
                                                   dataset.mask_te))
コード例 #3
0
Filippo Maria Bianchi, Daniele Grattarola, Cesare Alippi, Lorenzo Livi
"""

from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.layers import Dropout, Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2

from spektral.data.loaders import SingleLoader
from spektral.datasets.citation import Citation
from spektral.layers import ARMAConv
from spektral.transforms import LayerPreprocess

# Load data
dataset = Citation("cora", transforms=[LayerPreprocess(ARMAConv)])
mask_tr, mask_va, mask_te = dataset.mask_tr, dataset.mask_va, dataset.mask_te

# Parameters
channels = 16  # Number of channels in the first layer
iterations = 1  # Number of iterations to approximate each ARMA(1)
order = 2  # Order of the ARMA filter (number of parallel stacks)
share_weights = True  # Share weights in each ARMA stack
dropout_skip = 0.75  # Dropout rate for the internal skip connection of ARMA
dropout = 0.5  # Dropout rate for the features
l2_reg = 5e-5  # L2 regularization rate
learning_rate = 1e-2  # Learning rate
epochs = 20000  # Number of training epochs
patience = 100  # Patience for early stopping
a_dtype = dataset[0].a.dtype  # Only needed for TF 2.1
コード例 #4
0
ファイル: citation_gcn.py プロジェクト: zhuyuecai/spektral
from spektral.datasets.citation import Citation
from spektral.layers import GCNConv
from spektral.models.gcn import GCN
from spektral.transforms import AdjToSpTensor, LayerPreprocess

learning_rate = 1e-2
seed = 0
epochs = 200
patience = 10
data = "cora"

tf.random.set_seed(seed=seed)  # make weight initialization reproducible

# Load data
dataset = Citation(data,
                   normalize_x=True,
                   transforms=[LayerPreprocess(GCNConv),
                               AdjToSpTensor()])


# We convert the binary masks to sample weights so that we can compute the
# average loss over the nodes (following original implementation by
# Kipf & Welling)
def mask_to_weights(mask):
    return mask.astype(np.float32) / np.count_nonzero(mask)


weights_tr, weights_va, weights_te = (mask_to_weights(mask)
                                      for mask in (dataset.mask_tr,
                                                   dataset.mask_va,
                                                   dataset.mask_te))
コード例 #5
0
from spektral.datasets.citation import Citation
from spektral.layers import GCNConv
from spektral.models.gcn import GCN
from spektral.transforms import LayerPreprocess

learning_rate = 1e-2
seed = 0
epochs = 200
patience = 10
data = "cora"

tf.random.set_seed(seed=seed)  # make weight initialization reproducible

# Load data
dataset = Citation(data,
                   normalize_x=True,
                   transforms=[LayerPreprocess(GCNConv)])


# We convert the binary masks to sample weights so that we can compute the
# average loss over the nodes (following original implementation by
# Kipf & Welling)
def mask_to_weights(mask):
    return mask.astype(np.float32) / np.count_nonzero(mask)


weights_tr, weights_va, weights_te = (mask_to_weights(mask)
                                      for mask in (dataset.mask_tr,
                                                   dataset.mask_va,
                                                   dataset.mask_te))
コード例 #6
0
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2
from tensorflow.random import set_seed

from spektral.data.loaders import SingleLoader
from spektral.datasets.citation import Citation
from spektral.layers import GATConv
from spektral.transforms import LayerPreprocess

set_seed(0)

# Load data
dataset = Citation("cora",
                   normalize_x=True,
                   transforms=[LayerPreprocess(GATConv)])


def mask_to_weights(mask):
    return mask.astype(np.float32) / np.count_nonzero(mask)


weights_tr, weights_va, weights_te = (mask_to_weights(mask)
                                      for mask in (dataset.mask_tr,
                                                   dataset.mask_va,
                                                   dataset.mask_te))

# Parameters
channels = 8  # Number of channels in each head of the first GAT layer
n_attn_heads = 8  # Number of attention heads in first GAT layer
コード例 #7
0
import numpy as np
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.layers import Dropout, Input
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2

from spektral.data.loaders import SingleLoader
from spektral.datasets.citation import Citation
from spektral.layers import ChebConv
from spektral.transforms import AdjToSpTensor, LayerPreprocess

# Load data
dataset = Citation("cora",
                   transforms=[LayerPreprocess(ChebConv),
                               AdjToSpTensor()])


# We convert the binary masks to sample weights so that we can compute the
# average loss over the nodes (following original implementation by
# Kipf & Welling)
def mask_to_weights(mask):
    return mask / np.count_nonzero(mask)


weights_tr, weights_va, weights_te = (mask_to_weights(mask)
                                      for mask in (dataset.mask_tr,
                                                   dataset.mask_va,
                                                   dataset.mask_te))
コード例 #8
0
    def __init__(self, K):
        self.K = K

    def __call__(self, graph):
        out = graph.a
        for _ in range(self.K - 1):
            out = out.dot(out)
        out.sort_indices()
        graph.a = out
        return graph


# Load data
K = 2  # Propagation steps for SGCN
dataset = Citation(
    "cora", transforms=[LayerPreprocess(GCNConv),
                        SGCN(K),
                        AdjToSpTensor()])
mask_tr, mask_va, mask_te = dataset.mask_tr, dataset.mask_va, dataset.mask_te

# Parameters
l2_reg = 5e-6  # L2 regularization rate
learning_rate = 0.2  # Learning rate
epochs = 20000  # Number of training epochs
patience = 200  # Patience for early stopping
a_dtype = dataset[0].a.dtype  # Only needed for TF 2.1

N = dataset.n_nodes  # Number of nodes in the graph
F = dataset.n_node_features  # Original size of node features
n_out = dataset.n_labels  # Number of classes

# Model definition
コード例 #9
0
    def __init__(self, K):
        self.K = K

    def __call__(self, graph):
        out = graph.a
        for _ in range(self.K - 1):
            out = out.dot(out)
        out.sort_indices()
        graph.a = out
        return graph


# Load data
K = 2  # Propagation steps for SGCN
dataset = Citation(
    param_data(),
    transforms=[LayerPreprocess(GCNConv),
                SGCN(K), AdjToSpTensor()])
mask_tr, mask_va, mask_te = dataset.mask_tr, dataset.mask_va, dataset.mask_te

# Parameters
l2_reg = 5e-6  # L2 regularization rate
learning_rate = param_learn()  # Learning rate
epochs = param_epoch()  # Number of training epochs
patience = param_pat()  # Patience for early stopping

a_dtype = dataset[0].a.dtype  # Only needed for TF 2.1
N = dataset.n_nodes  # Number of nodes in the graph
F = dataset.n_node_features  # Original size of node features
n_out = dataset.n_labels  # Number of classes