Exemplo n.º 1
0
class GraphModel(Model, ABC):
    transforms = [LayerPreprocess(GCNConv), AdjToSpTensor()]

    def get_network(self, params, n_inputs, n_outputs):
        return GCN(n_labels=n_outputs,
                   channels=params.channels,
                   n_input_channels=n_inputs,
                   output_activation=self.output_activation,
                   l2_reg=params.l2_loss_coefficient)

    def fit_network(self, params, dataset):
        # weights_va, weights_te = (
        #     utils.mask_to_weights(mask).astype(np.float32)
        #     for mask in (dataset.mask_va, dataset.mask_te)
        # )
        weights_tr, weights_va = [
            utils.weight_by_class(dataset[0].y, mask)
            for mask in [dataset.mask_tr, dataset.mask_va]
        ]

        loader_tr = SingleLoader(dataset, sample_weights=weights_tr)
        loader_va = SingleLoader(dataset, sample_weights=weights_va)
        history = self.network.fit(
            loader_tr.load(),
            steps_per_epoch=loader_tr.steps_per_epoch,
            validation_data=loader_va.load(),
            validation_steps=loader_va.steps_per_epoch,
            epochs=params.epochs,
            callbacks=[
                tf.keras.callbacks.EarlyStopping(monitor="val_loss",
                                                 patience=params.patience,
                                                 restore_best_weights=True),
                tf.keras.callbacks.ModelCheckpoint(os.path.join(
                    params.directory, self.__name__ + ".h5"),
                                                   monitor="val_loss",
                                                   save_best_only=True,
                                                   save_weights_only=True)
            ])
        return history
Exemplo n.º 2
0
import numpy as np
import tensorflow as tf
from ogb.nodeproppred import Evaluator, NodePropPredDataset
from tensorflow.keras.layers import BatchNormalization, Dropout, Input
from tensorflow.keras.losses import SparseCategoricalCrossentropy
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam

from spektral.datasets.ogb import OGB
from spektral.layers import GCNConv
from spektral.transforms import AdjToSpTensor, GCNFilter

# Load data
dataset_name = "ogbn-arxiv"
ogb_dataset = NodePropPredDataset(dataset_name)
dataset = OGB(ogb_dataset, transforms=[GCNFilter(), AdjToSpTensor()])
graph = dataset[0]
x, adj, y = graph.x, graph.a, graph.y

# Parameters
channels = 256  # Number of channels for GCN layers
dropout = 0.5  # Dropout rate for the features
learning_rate = 1e-2  # Learning rate
epochs = 200  # Number of training epochs

N = dataset.n_nodes  # Number of nodes in the graph
F = dataset.n_node_features  # Original size of node features
n_out = ogb_dataset.num_classes  # OGB labels are sparse indices

# Data splits
idx = ogb_dataset.get_idx_split()
Exemplo n.º 3
0
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2

from spektral.data.loaders import SingleLoader
from spektral.datasets.citation import Citation
from spektral.layers import GCNConv
from spektral.transforms import LayerPreprocess, AdjToSpTensor

tf.random.set_seed(seed=0)  # make weight initialization reproducible

# Load data
dataset = Citation('cora',
                   transforms=[LayerPreprocess(GCNConv),
                               AdjToSpTensor()])


# We convert the binary masks to sample weights so that we can compute the
# average loss over the nodes (following original implementation by
# Kipf & Welling)
def mask_to_weights(mask):
    return mask / np.count_nonzero(mask)


weights_tr, weights_va, weights_te = (mask_to_weights(mask)
                                      for mask in (dataset.mask_tr,
                                                   dataset.mask_va,
                                                   dataset.mask_te))

# Parameters
Exemplo n.º 4
0
        for circ in circs:
            A, X, labels = load_data(circ, '../data/output', normalize="")
#             if sum(labels) >= 500:
            print(f"{circ}: {sum(labels)}, {len(labels)}")
            circuits.append(Graph(x=X.toarray(), a=A, y=labels))

        return circuits

def normalize_feature(circ_dataset):
    scaler = MinMaxScaler()
    scaler.fit(x_data)
    for graph in circ_dataset:
        graph.x = scaler.transform(graph.x)
    return circ_dataset

dataset = normalize_feature(CircuitDataset(transforms=[AdjToSpTensor()]))

# Parameters

F = dataset.n_node_features  # Dimension of node features
n_out = dataset.n_labels  # Dimension of the target

# Train/valid/test split
np.random.seed(42)
idxs = np.random.permutation(len(dataset))
split_va, split_te = int(0.8 * len(dataset)), int(0.9 * len(dataset))
idx_tr, idx_va, idx_te = np.split(idxs, [split_va, split_te])
print(idx_tr, idx_va, idx_te)
dataset_tr = dataset[idx_tr]
dataset_va = dataset[idx_va]
dataset_te = dataset[idx_te]
Exemplo n.º 5
0
"""

from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.layers import Input, Dropout
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2

from spektral.data.loaders import SingleLoader
from spektral.datasets.citation import Citation
from spektral.layers import ChebConv
from spektral.transforms import LayerPreprocess, AdjToSpTensor

# Load data
dataset = Citation('cora',
                   transforms=[LayerPreprocess(ChebConv), AdjToSpTensor()])
mask_tr, mask_va, mask_te = dataset.mask_tr, dataset.mask_va, dataset.mask_te

# Parameters
channels = 16          # Number of channels in the first layer
K = 2                  # Max degree of the Chebyshev polynomials
dropout = 0.5          # Dropout rate for the features
l2_reg = 5e-4 / 2      # L2 regularization rate
learning_rate = 1e-2   # Learning rate
epochs = 200           # Number of training epochs
patience = 10          # Patience for early stopping
a_dtype = dataset[0].a.dtype  # Only needed for TF 2.1

N = dataset.n_nodes          # Number of nodes in the graph
F = dataset.n_node_features  # Original size of node features
n_out = dataset.n_labels     # Number of classes
Exemplo n.º 6
0
    weighted_metrics=["acc"],
)

# Train model
loader_tr = SingleLoader(dataset, sample_weights=weights_tr)
loader_va = SingleLoader(dataset, sample_weights=weights_va)

model.fit(
    loader_tr.load(),
    steps_per_epoch=loader_tr.steps_per_epoch,
    validation_data=loader_va.load(),
    validation_steps=loader_va.steps_per_epoch,
    epochs=epochs,
    callbacks=[EarlyStopping(patience=patience, restore_best_weights=True)],
)

# Set up explainer
dataset.apply(AdjToSpTensor())
x_exp, a_exp = dataset[0].x, dataset[0].a
explainer = GNNExplainer(model, preprocess=gcn_filter, verbose=True)

# Explain prediction for one node
node_idx = 1000
adj_mask, feat_mask = explainer.explain_node(x=x_exp,
                                             a=a_exp,
                                             node_idx=node_idx)

# Plot the result
G = explainer.plot_subgraph(adj_mask, feat_mask, node_idx)
plt.show()
Exemplo n.º 7
0
    def __call__(self, graph):
        out = graph.a
        for _ in range(self.K - 1):
            out = out.dot(out)
        out.sort_indices()
        graph.a = out
        return graph


# Load data
K = 2  # Propagation steps for SGCN
dataset = Citation(
    param_data(),
    transforms=[LayerPreprocess(GCNConv),
                SGCN(K), AdjToSpTensor()])
mask_tr, mask_va, mask_te = dataset.mask_tr, dataset.mask_va, dataset.mask_te

# Parameters
l2_reg = 5e-6  # L2 regularization rate
learning_rate = param_learn()  # Learning rate
epochs = param_epoch()  # Number of training epochs
patience = param_pat()  # Patience for early stopping

a_dtype = dataset[0].a.dtype  # Only needed for TF 2.1
N = dataset.n_nodes  # Number of nodes in the graph
F = dataset.n_node_features  # Original size of node features
n_out = dataset.n_labels  # Number of classes


# Model definition
Exemplo n.º 8
0
import tensorflow as tf
from tensorflow.keras.layers import Input, Dropout
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.metrics import CategoricalAccuracy
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2

from spektral.datasets.citation import Cora
from spektral.layers import GATConv
from spektral.transforms import LayerPreprocess, AdjToSpTensor
from spektral.utils import tic, toc

# Load data
dataset = Cora(transforms=[LayerPreprocess(GATConv), AdjToSpTensor()])
graph = dataset[0]
x, a, y = graph.x, graph.a, graph.y
mask_tr, mask_va, mask_te = dataset.mask_tr, dataset.mask_va, dataset.mask_te

# Define model
x_in = Input(shape=(dataset.n_node_features, ))
a_in = Input(shape=(None, ), sparse=True)
x_1 = Dropout(0.6)(x_in)
x_1 = GATConv(8,
              attn_heads=8,
              concat_heads=True,
              dropout_rate=0.6,
              activation='elu',
              kernel_regularizer=l2(5e-4),
              attn_kernel_regularizer=l2(5e-4),