def test_qm7(): dataset = datasets.QM7() dl = DisjointLoader(dataset, batch_size=batch_size) dl.__next__() bl = BatchLoader(dataset, batch_size=batch_size) bl.__next__()
def test_qm9(): dataset = datasets.QM9(amount=100) dl = DisjointLoader(dataset, batch_size=batch_size) dl.__next__() bl = BatchLoader(dataset, batch_size=batch_size) bl.__next__()
def test_batch(): data = TestDataset() loader = BatchLoader(data, batch_size=batch_size, epochs=1, shuffle=False) batches = list(loader) (x, a, e), y = batches[-1] n = max(ns[-graphs_in_batch:]) assert x.shape == (graphs_in_batch, n, f) assert a.shape == (graphs_in_batch, n, n) assert e.shape == (graphs_in_batch, n, n, s) assert y.shape == (graphs_in_batch, 2) assert loader.steps_per_epoch == np.ceil(len(data) / batch_size) signature = loader.tf_signature() assert len(signature[0]) == 3
def test_batch(): data = TestDataset() loader = BatchLoader(data, batch_size=batch_size, epochs=1, shuffle=False) batches = [b for b in loader] (x, a, e), y = batches[-1] n = max(ns[-graphs_in_batch:]) assert x.shape == (graphs_in_batch, n, f) assert a.shape == (graphs_in_batch, n, n) assert e.shape == (graphs_in_batch, n, n, s) assert y.shape == (graphs_in_batch, 2)
def test_tud(): # Edge labels + edge attributes dataset = datasets.TUDataset('BZR_MD', clean=False) dl = DisjointLoader(dataset, batch_size=batch_size) dl.__next__() bl = BatchLoader(dataset, batch_size=batch_size) bl.__next__() # Node labels + node attributes + clean version dataset = datasets.TUDataset('ENZYMES', clean=True) dl = DisjointLoader(dataset, batch_size=batch_size) dl.__next__() bl = BatchLoader(dataset, batch_size=batch_size) bl.__next__()
# BUILD MODEL ################################################################################ X_in = Input(shape=(None, F)) A_in = Input(shape=(None, None)) E_in = Input(shape=(None, None, S)) X_1 = ECCConv(32, activation="relu")([X_in, A_in, E_in]) X_2 = ECCConv(32, activation="relu")([X_1, A_in, E_in]) X_3 = GlobalSumPool()(X_2) output = Dense(n_out)(X_3) # Build model model = Model(inputs=[X_in, A_in, E_in], outputs=output) optimizer = Adam(lr=learning_rate) model.compile(optimizer=optimizer, loss="mse") model.summary() ################################################################################ # FIT MODEL ################################################################################ loader_tr = BatchLoader(dataset_tr, batch_size=batch_size) model.fit(loader_tr.load(), steps_per_epoch=loader_tr.steps_per_epoch, epochs=epochs) ################################################################################ # EVALUATE MODEL ################################################################################ print("Testing model") loader_te = BatchLoader(dataset_te, batch_size=batch_size) model_loss = model.evaluate(loader_te.load(), steps=loader_te.steps_per_epoch) print("Done. Test loss: {}".format(model_loss))
# Parameters N = max(g.n_nodes for g in dataset) F = dataset.n_node_features # Dimension of node features S = dataset.n_edge_features # Dimension of edge features n_out = dataset.n_labels # Dimension of the target # Train/test split idxs = np.random.permutation(len(dataset)) split_va, split_te = int(0.8 * len(dataset)), int(0.9 * len(dataset)) idx_tr, idx_va, idx_te = np.split(idxs, [split_va, split_te]) dataset_tr = dataset[idx_tr] dataset_va = dataset[idx_va] dataset_te = dataset[idx_te] loader_tr = BatchLoader(dataset_tr, batch_size=batch_size, mask=True) loader_va = BatchLoader(dataset_va, batch_size=batch_size, mask=True) loader_te = BatchLoader(dataset_te, batch_size=batch_size, mask=True) ################################################################################ # Build model ################################################################################ class Net(Model): def __init__(self): super().__init__() self.mask = GraphMasking() self.conv1 = GCSConv(32, activation="relu") self.pool = MinCutPool(N // 2) self.conv2 = GCSConv(32, activation="relu") self.global_pool = GlobalSumPool()
X_1 = GCNConv(32, activation='relu')([X_in, A_in]) X_1, A_1 = MinCutPool(N // 2)([X_1, A_in]) X_2 = GCNConv(32, activation='relu')([X_1, A_1]) X_3 = GlobalSumPool()(X_2) output = Dense(n_out)(X_3) # Build model model = Model(inputs=[X_in, A_in], outputs=output) opt = Adam(lr=learning_rate) model.compile(optimizer=opt, loss='mse') model.summary() ################################################################################ # FIT MODEL ################################################################################ loader_tr = BatchLoader(dataset_tr, batch_size=batch_size) loader_va = BatchLoader(dataset_va, batch_size=batch_size) model.fit(loader_tr, steps_per_epoch=loader_tr.steps_per_epoch, epochs=epochs, validation_data=loader_va, validation_steps=loader_va.steps_per_epoch, callbacks=[EarlyStopping(patience=10, restore_best_weights=True)]) ################################################################################ # EVALUATE MODEL ################################################################################ print('Testing model') evaluator = Evaluator(name=dataset_name) loader_te = BatchLoader(dataset_te, batch_size=batch_size, epochs=1) y_pred = model.predict(loader_te, batch_size=batch_size)