def make_splits(graphs_pkl): # Generate train, val and test splits splits = dict.fromkeys(["train", "val", "test"]) for split_name in splits: splits[split_name] = ProteinDataset( graphs_pkl, mask_func=mask_generator(split_name, 42, 0.70, 0.15), transforms=[LayerPreprocess(GATConv)], # , AdjToSpTensor() ) return splits
class GraphModel(Model, ABC): transforms = [LayerPreprocess(GCNConv), AdjToSpTensor()] def get_network(self, params, n_inputs, n_outputs): return GCN(n_labels=n_outputs, channels=params.channels, n_input_channels=n_inputs, output_activation=self.output_activation, l2_reg=params.l2_loss_coefficient) def fit_network(self, params, dataset): # weights_va, weights_te = ( # utils.mask_to_weights(mask).astype(np.float32) # for mask in (dataset.mask_va, dataset.mask_te) # ) weights_tr, weights_va = [ utils.weight_by_class(dataset[0].y, mask) for mask in [dataset.mask_tr, dataset.mask_va] ] loader_tr = SingleLoader(dataset, sample_weights=weights_tr) loader_va = SingleLoader(dataset, sample_weights=weights_va) history = self.network.fit( loader_tr.load(), steps_per_epoch=loader_tr.steps_per_epoch, validation_data=loader_va.load(), validation_steps=loader_va.steps_per_epoch, epochs=params.epochs, callbacks=[ tf.keras.callbacks.EarlyStopping(monitor="val_loss", patience=params.patience, restore_best_weights=True), tf.keras.callbacks.ModelCheckpoint(os.path.join( params.directory, self.__name__ + ".h5"), monitor="val_loss", save_best_only=True, save_weights_only=True) ]) return history
from tensorflow.keras.layers import Input, Dropout from tensorflow.keras.losses import CategoricalCrossentropy from tensorflow.keras.models import Model from tensorflow.keras.optimizers import Adam from tensorflow.keras.regularizers import l2 from spektral.data.loaders import SingleLoader from spektral.datasets.citation import Citation from spektral.layers import GCNConv from spektral.transforms import LayerPreprocess, AdjToSpTensor tf.random.set_seed(seed=0) # make weight initialization reproducible # Load data dataset = Citation('cora', transforms=[LayerPreprocess(GCNConv), AdjToSpTensor()]) # We convert the binary masks to sample weights so that we can compute the # average loss over the nodes (following original implementation by # Kipf & Welling) def mask_to_weights(mask): return mask / np.count_nonzero(mask) weights_tr, weights_va, weights_te = (mask_to_weights(mask) for mask in (dataset.mask_tr, dataset.mask_va, dataset.mask_te))
class SGCN: def __init__(self, K): self.K = K def __call__(self, graph): out = graph.a for _ in range(self.K - 1): out = out.dot(out) out.sort_indices() graph.a = out return graph # Load data K = 2 # Propagation steps for SGCN dataset = Citation("cora", transforms=[LayerPreprocess(GCNConv), SGCN(K)]) mask_tr, mask_va, mask_te = dataset.mask_tr, dataset.mask_va, dataset.mask_te # Parameters l2_reg = 5e-6 # L2 regularization rate learning_rate = 0.2 # Learning rate epochs = 20000 # Number of training epochs patience = 200 # Patience for early stopping a_dtype = dataset[0].a.dtype # Only needed for TF 2.1 N = dataset.n_nodes # Number of nodes in the graph F = dataset.n_node_features # Original size of node features n_out = dataset.n_labels # Number of classes # Model definition x_in = Input(shape=(F, ))
Filippo Maria Bianchi, Daniele Grattarola, Cesare Alippi, Lorenzo Livi """ from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.layers import Dropout, Input from tensorflow.keras.models import Model from tensorflow.keras.optimizers import Adam from tensorflow.keras.regularizers import l2 from spektral.data.loaders import SingleLoader from spektral.datasets.citation import Citation from spektral.layers import ARMAConv from spektral.transforms import LayerPreprocess # Load data dataset = Citation("cora", transforms=[LayerPreprocess(ARMAConv)]) mask_tr, mask_va, mask_te = dataset.mask_tr, dataset.mask_va, dataset.mask_te # Parameters channels = 16 # Number of channels in the first layer iterations = 1 # Number of iterations to approximate each ARMA(1) order = 2 # Order of the ARMA filter (number of parallel stacks) share_weights = True # Share weights in each ARMA stack dropout_skip = 0.75 # Dropout rate for the internal skip connection of ARMA dropout = 0.5 # Dropout rate for the features l2_reg = 5e-5 # L2 regularization rate learning_rate = 1e-2 # Learning rate epochs = 20000 # Number of training epochs patience = 100 # Patience for early stopping a_dtype = dataset[0].a.dtype # Only needed for TF 2.1
import numpy as np from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.layers import Input, Dropout from tensorflow.keras.losses import CategoricalCrossentropy from tensorflow.keras.models import Model from tensorflow.keras.optimizers import Adam from tensorflow.keras.regularizers import l2 from spektral.data.loaders import SingleLoader from spektral.datasets.citation import Citation from spektral.layers import ChebConv from spektral.transforms import LayerPreprocess, AdjToSpTensor # Load data dataset = Citation('cora', transforms=[LayerPreprocess(ChebConv), AdjToSpTensor()]) # We convert the binary masks to sample weights so that we can compute the # average loss over the nodes (following original implementation by # Kipf & Welling) def mask_to_weights(mask): return mask / np.count_nonzero(mask) weights_tr, weights_va, weights_te = (mask_to_weights(mask) for mask in (dataset.mask_tr, dataset.mask_va, dataset.mask_te))
from tensorflow.keras.losses import CategoricalCrossentropy from tensorflow.keras.metrics import categorical_accuracy from tensorflow.keras.models import Model from tensorflow.keras.optimizers import Adam from tensorflow.keras.regularizers import l2 from spektral.datasets.citation import Cora from spektral.layers import GATConv from spektral.transforms import AdjToSpTensor, LayerPreprocess from spektral.utils import tic, toc tf.random.set_seed(0) # Load data dataset = Cora(normalize_x=True, transforms=[LayerPreprocess(GATConv), AdjToSpTensor()]) graph = dataset[0] x, a, y = graph.x, graph.a, graph.y mask_tr, mask_va, mask_te = dataset.mask_tr, dataset.mask_va, dataset.mask_te l2_reg = 2.5e-4 # Define model x_in = Input(shape=(dataset.n_node_features, )) a_in = Input(shape=(None, ), sparse=True) x_1 = Dropout(0.6)(x_in) x_1 = GATConv( 8, attn_heads=8, concat_heads=True, dropout_rate=0.6,
""" from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.layers import Dropout, Input from tensorflow.keras.models import Model from tensorflow.keras.optimizers import Adam from tensorflow.keras.regularizers import l2 from spektral.data.loaders import SingleLoader from spektral.datasets.citation import Citation from spektral.layers import ARMAConv from spektral.transforms import AdjToSpTensor, LayerPreprocess # Load data dataset = Citation("cora", transforms=[LayerPreprocess(ARMAConv), AdjToSpTensor()]) mask_tr, mask_va, mask_te = dataset.mask_tr, dataset.mask_va, dataset.mask_te # Parameters channels = 16 # Number of channels in the first layer iterations = 1 # Number of iterations to approximate each ARMA(1) order = 2 # Order of the ARMA filter (number of parallel stacks) share_weights = True # Share weights in each ARMA stack dropout_skip = 0.75 # Dropout rate for the internal skip connection of ARMA dropout = 0.5 # Dropout rate for the features l2_reg = 5e-5 # L2 regularization rate learning_rate = 1e-2 # Learning rate epochs = 20000 # Number of training epochs patience = 100 # Patience for early stopping a_dtype = dataset[0].a.dtype # Only needed for TF 2.1
""" from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.layers import Input, Dropout from tensorflow.keras.models import Model from tensorflow.keras.optimizers import Adam from tensorflow.keras.regularizers import l2 from spektral.data.loaders import SingleLoader from spektral.datasets.citation import Citation from spektral.layers import ChebConv from spektral.transforms import LayerPreprocess, AdjToSpTensor # Load data dataset = Citation('cora', transforms=[LayerPreprocess(ChebConv), AdjToSpTensor()]) mask_tr, mask_va, mask_te = dataset.mask_tr, dataset.mask_va, dataset.mask_te # Parameters channels = 16 # Number of channels in the first layer K = 2 # Max degree of the Chebyshev polynomials dropout = 0.5 # Dropout rate for the features l2_reg = 5e-4 / 2 # L2 regularization rate learning_rate = 1e-2 # Learning rate epochs = 200 # Number of training epochs patience = 10 # Patience for early stopping a_dtype = dataset[0].a.dtype # Only needed for TF 2.1 N = dataset.n_nodes # Number of nodes in the graph F = dataset.n_node_features # Original size of node features n_out = dataset.n_labels # Number of classes
import tensorflow as tf from tensorflow.keras.layers import Input, Dropout from tensorflow.keras.losses import CategoricalCrossentropy from tensorflow.keras.metrics import CategoricalAccuracy from tensorflow.keras.models import Model from tensorflow.keras.optimizers import Adam from tensorflow.keras.regularizers import l2 from spektral.datasets.citation import Cora from spektral.layers import GATConv from spektral.transforms import LayerPreprocess, AdjToSpTensor from spektral.utils import tic, toc # Load data dataset = Cora(transforms=[LayerPreprocess(GATConv), AdjToSpTensor()]) graph = dataset[0] x, a, y = graph.x, graph.a, graph.y mask_tr, mask_va, mask_te = dataset.mask_tr, dataset.mask_va, dataset.mask_te # Define model x_in = Input(shape=(dataset.n_node_features, )) a_in = Input(shape=(None, ), sparse=True) x_1 = Dropout(0.6)(x_in) x_1 = GATConv(8, attn_heads=8, concat_heads=True, dropout_rate=0.6, activation='elu', kernel_regularizer=l2(5e-4), attn_kernel_regularizer=l2(5e-4),
from spektral.layers import GCNConv from spektral.models import GNNExplainer from spektral.models.gcn import GCN from spektral.transforms import AdjToSpTensor, LayerPreprocess from spektral.utils import gcn_filter # Config learning_rate = 1e-2 seed = 0 epochs = 50 patience = 10 data = "cora" tf.random.set_seed(seed=seed) # make weight initialization reproducible # Load data dataset = Citation(data, normalize_x=True, transforms=[LayerPreprocess(GCNConv)]) # We convert the binary masks to sample weights so that we can compute the # average loss over the nodes (following original implementation by # Kipf & Welling) def mask_to_weights(mask): return mask.astype(np.float32) / np.count_nonzero(mask) weights_tr, weights_va, weights_te = ( mask_to_weights(mask) for mask in (dataset.mask_tr, dataset.mask_va, dataset.mask_te) ) model = GCN(n_labels=dataset.n_labels)