示例#1
0
from spektral.data import DisjointLoader

from spektral.datasets import TUDataset
from tensorflow.keras.optimizers import Adam

physical_devices = tf.config.list_physical_devices('GPU')
if len(physical_devices) > 0:
    tf.config.experimental.set_memory_growth(physical_devices[0], True)

# Best config
batch_size = 32
learning_rate = 0.01
epochs = 400

# Read data
data = TUDataset('PROTEINS')

# Train/test split
np.random.shuffle(data)
split = int(0.8 * len(data))
data_tr, data_te = data[:split], data[split:]

# Data loader
loader_tr = DisjointLoader(data_tr, batch_size=batch_size, epochs=epochs)
loader_te = DisjointLoader(data_te, batch_size=batch_size)

# Create model
model = GeneralGNN(data.n_labels, activation='softmax')
optimizer = Adam(learning_rate)
model.compile('adam', 'categorical_crossentropy', metrics=['categorical_accuracy'])
import tensorflow as tf
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.metrics import categorical_accuracy
from tensorflow.keras.optimizers import Adam

from spektral.data import DisjointLoader
from spektral.datasets import TUDataset
from spektral.models import GeneralGNN, GNNExplainer

# Config
learning_rate = 1e-2
batch_size = 32
epochs = 100

# Load data
data = TUDataset("PROTEINS")

# Train/test split
np.random.shuffle(data)
split = int(0.8 * len(data))
data_tr, data_te = data[:split], data[split:]

# Data loaders
loader_tr = DisjointLoader(data_tr, batch_size=batch_size, epochs=epochs)
loader_te = DisjointLoader(data_te, batch_size=batch_size)

# Create model
model = GeneralGNN(data.n_labels, activation="softmax")
optimizer = Adam(learning_rate)
loss_fn = CategoricalCrossentropy()
示例#3
0
from spektral.data import BatchLoader
from spektral.datasets import TUDataset
from spektral.layers import GCSConv, GlobalSumPool, GraphMasking, MinCutPool

################################################################################
# Config
################################################################################
learning_rate = 1e-3  # Learning rate
epochs = 10  # Number of training epochs
batch_size = 32  # Batch size

################################################################################
# Load data
################################################################################
dataset = TUDataset("PROTEINS", clean=True)

# Parameters
N = max(g.n_nodes for g in dataset)
F = dataset.n_node_features  # Dimension of node features
S = dataset.n_edge_features  # Dimension of edge features
n_out = dataset.n_labels  # Dimension of the target

# Train/test split
idxs = np.random.permutation(len(dataset))
split_va, split_te = int(0.8 * len(dataset)), int(0.9 * len(dataset))
idx_tr, idx_va, idx_te = np.split(idxs, [split_va, split_te])
dataset_tr = dataset[idx_tr]
dataset_va = dataset[idx_va]
dataset_te = dataset[idx_te]
示例#4
0
dataset = sys.argv[1]

if (dataset[:2] == 'vs') or (dataset == 'all'):
    data_type = 'MNIST'
else:
    data_type = 'GRAPHS'

use_spektral = True

if data_type == 'GRAPHS':

    path = path + 'data/graphs/' + dataset + '/mat/'

    if use_spektral:

        data = TUDataset(dataset)
        for i in range(len(data)):
            stfile = open(path + 'SPK' + str(i) + '_st.txt', 'w')
            A = np.array(data[i].a.todense())
            num_vertices = A.shape[0]
            for v in range(num_vertices):
                stfile.write(str(v) + ' \n')
            idxs = np.argwhere(A > 0)
            for v in range(len(idxs)):
                stfile.write(str(idxs[v, 0]) + ' ' + str(idxs[v, 1]) + ' \n')
            stfile.close()

    else:

        pad_size = 50
        Cinit = np.ones([1, pad_size])
示例#5
0
from spektral.data import BatchLoader
from spektral.datasets import TUDataset
from spektral.layers import GCNConv, MinCutPool, GlobalSumPool

################################################################################
# PARAMETERS
################################################################################
learning_rate = 1e-3  # Learning rate
epochs = 10  # Number of training epochs
batch_size = 32  # Batch size

################################################################################
# LOAD DATA
################################################################################
dataset = TUDataset('PROTEINS', clean=True)

# Parameters
N = max(g.n_nodes for g in dataset)
F = dataset.n_node_features  # Dimension of node features
S = dataset.n_edge_features  # Dimension of edge features
n_out = dataset.n_labels  # Dimension of the target

# Train/test split
idxs = np.random.permutation(len(dataset))
split_va, split_te = int(0.8 * len(dataset)), int(0.9 * len(dataset))
idx_tr, idx_va, idx_te = np.split(idxs, [split_va, split_te])
dataset_tr = dataset[idx_tr]
dataset_va = dataset[idx_va]
dataset_te = dataset[idx_te]