Пример #1
0
def test_single():
    data = TestDatasetSingle()
    n = data.n_nodes
    loader = SingleLoader(data, sample_weights=np.ones(n), epochs=1)
    batches = list(loader)
    assert len(batches) == 1

    (x, a, e), y, sw = batches[0]
    assert x.shape == (n, f)
    assert a.shape == (n, n)
    assert len(e.shape) == 3 and e.shape[-1] == s  # Avoid counting edges
    assert y.shape == (n, 2)
    assert loader.steps_per_epoch == 1
    signature = loader.tf_signature()
    assert len(signature[0]) == 3
Пример #2
0
def test_single():
    data = TestDatasetSingle()
    n = data.n_nodes
    loader = SingleLoader(data, sample_weights=np.ones(n), epochs=1)
    batches = [b for b in loader]
    assert len(batches) == 1

    (x, a, e), y, sw = batches[0]
    assert x.shape == (n, f)
    assert a.shape == (n, n)
    assert len(e.shape) == 3 and e.shape[-1] == s  # Avoid counting edges
    assert y.shape == (n, 2)
Пример #3
0
    def train_model(self):
        loader_tr = SingleLoader(dataset, sample_weights=mask_tr)
        loader_va = SingleLoader(dataset, sample_weights=mask_va)
        result = self.build_model().fit(
            loader_tr.load(),
            steps_per_epoch=loader_tr.steps_per_epoch,
            validation_data=loader_va.load(),
            validation_steps=loader_va.steps_per_epoch,
            epochs=epochs,
            callbacks=[
                EarlyStopping(patience=patience, restore_best_weights=True)
            ],
        )

        loader_te = SingleLoader(dataset, sample_weights=mask_te)
        eval_results = self.build_model().evaluate(
            loader_te.load(), steps=loader_te.steps_per_epoch)
        print("Done.\n"
              "Test loss: {}\n"
              "Test accuracy: {}".format(*eval_results))

        return result, eval_results
Пример #4
0
               kernel_regularizer=l2(l2_reg),
               use_bias=False)([do_1, a_in])
do_2 = Dropout(dropout)(gc_1)
gc_2 = GCNConv(n_out, activation='softmax', use_bias=False)([do_2, a_in])

# Build model
model = Model(inputs=[x_in, a_in], outputs=gc_2)
optimizer = Adam(lr=learning_rate)
model.compile(
    optimizer=optimizer,
    loss=CategoricalCrossentropy(reduction='sum'),  # To compute mean
    weighted_metrics=['acc'])
model.summary()

# Train model
loader_tr = SingleLoader(dataset, sample_weights=weights_tr)
loader_va = SingleLoader(dataset, sample_weights=weights_va)
model.fit(
    loader_tr.load(),
    steps_per_epoch=loader_tr.steps_per_epoch,
    validation_data=loader_va.load(),
    validation_steps=loader_va.steps_per_epoch,
    epochs=epochs,
    callbacks=[EarlyStopping(patience=patience, restore_best_weights=True)])

# Evaluate model
print('Evaluating model.')
loader_te = SingleLoader(dataset, sample_weights=weights_te)
eval_results = model.evaluate(loader_te.load(),
                              steps=loader_te.steps_per_epoch)
print('Done.\n' 'Test loss: {}\n' 'Test accuracy: {}'.format(*eval_results))
Пример #5
0
    dropout_rate=dropout_skip,
    activation="softmax",
    gcn_activation=None,
    kernel_regularizer=l2(l2_reg),
)([gc_2, a_in])

# Build model
model = Model(inputs=[x_in, a_in], outputs=gc_2)
optimizer = Adam(learning_rate=learning_rate)
model.compile(
    optimizer=optimizer, loss="categorical_crossentropy", weighted_metrics=["acc"]
)
model.summary()

# Train model
loader_tr = SingleLoader(dataset, sample_weights=mask_tr)
loader_va = SingleLoader(dataset, sample_weights=mask_va)
model.fit(
    loader_tr.load(),
    steps_per_epoch=loader_tr.steps_per_epoch,
    validation_data=loader_va.load(),
    validation_steps=loader_va.steps_per_epoch,
    epochs=epochs,
    callbacks=[EarlyStopping(patience=patience, restore_best_weights=True)],
)

# Evaluate model
print("Evaluating model.")
loader_te = SingleLoader(dataset, sample_weights=mask_te)
eval_results = model.evaluate(loader_te.load(), steps=loader_te.steps_per_epoch)
print("Done.\n" "Test loss: {}\n" "Test accuracy: {}".format(*eval_results))
Пример #6
0
    else:
        Cinit = np.random.uniform(size=[2,pad_size])
        num_filts = len(Cinit)

    graph_names = []

    if use_spektral:
        FUNC, LAB = [], []
        for i in range(len(data)):

            if 'x' not in data[i].keys:
                data[i].x = np.ones([data[i].a.shape[0], 10])

            LAB.append(np.argmax(data[i].y))
            
            loader = SingleLoader(data[i:i+1], epochs=1)
            for b in loader:
                FUNC.append(gnn([b[0][0], b[0][1]]).numpy())

            graph_names.append('SPK' + str(i))

    else:
        list_graph_names = np.array(os.listdir(path))    
        EVA, EVE, LAB = [], [], []
        for graph in list_graph_names:
            if graph[-4:] == '.mat':
                graph_names.append(graph)
                name = graph.split('_')
                gid = int(name[name.index('gid') + 1]) - 1
                A = np.array(loadmat(path + graph)['A'], dtype=np.float32)
                num_vertices = A.shape[0]
Пример #7
0
weights_tr, weights_va, weights_te = (mask_to_weights(mask)
                                      for mask in (dataset.mask_tr,
                                                   dataset.mask_va,
                                                   dataset.mask_te))

model = GCN(n_labels=dataset.n_labels,
            n_input_channels=dataset.n_node_features)
model.compile(
    optimizer=Adam(learning_rate),
    loss=CategoricalCrossentropy(reduction="sum"),
    weighted_metrics=["acc"],
)

# Train model
loader_tr = SingleLoader(dataset, sample_weights=weights_tr)
loader_va = SingleLoader(dataset, sample_weights=weights_va)

model.fit(
    loader_tr.load(),
    steps_per_epoch=loader_tr.steps_per_epoch,
    validation_data=loader_va.load(),
    validation_steps=loader_va.steps_per_epoch,
    epochs=epochs,
    callbacks=[EarlyStopping(patience=patience, restore_best_weights=True)],
)

# Set up explainer
x_exp, a_exp = dataset[0].x, dataset[0].a
explainer = GNNExplainer(model, preprocess=gcn_filter, verbose=True)