Exemple #1
0
    def test_save_load(self):
        """
        Test if saving and loading the model in a new object gives the same results
        """
        filename = os.getcwd() + "/data/test_save_load"
        graph = gb.create_directed_barbell(4, 4)
        gae = GraphAutoEncoder(graph,
                               learning_rate=0.01,
                               support_size=[5, 5],
                               dims=[3, 5, 7, 6, 2],
                               batch_size=12,
                               max_total_steps=50,
                               verbose=True)
        gae.fit(graph)
        embed = gae.calculate_embeddings()
        gae.save_model(filename)

        gae2 = GraphAutoEncoder(graph,
                                learning_rate=0.01,
                                support_size=[5, 5],
                                dims=[3, 5, 7, 6, 2],
                                batch_size=12,
                                max_total_steps=50,
                                verbose=True)
        gae2.load_model(filename, graph)
        embed2 = gae2.calculate_embeddings()

        embed3 = np.subtract(embed, embed2)
        self.assertAlmostEqual(
            np.sum(embed3), 0, 4,
            "loaded model gives different result then original")
Exemple #2
0
    def test_fit(self):
        """
        Test if fit function results in the same results as when trained separately
        """
        graph = gb.create_directed_barbell(4, 4)
        gae = GraphAutoEncoder(graph,
                               learning_rate=0.01,
                               support_size=[5, 5],
                               dims=[3, 5, 7, 6, 2],
                               batch_size=12,
                               max_total_steps=50,
                               verbose=True)

        train_res = {}
        for i in range(len(gae.dims)):
            train_res["l" + str(i + 1)] = gae.train_layer(i + 1)

        train_res['all'] = gae.train_layer(len(gae.dims),
                                           all_layers=True,
                                           dropout=None)
        embed = gae.calculate_embeddings()

        gae2 = GraphAutoEncoder(graph,
                                learning_rate=0.01,
                                support_size=[5, 5],
                                dims=[3, 5, 7, 6, 2],
                                batch_size=12,
                                max_total_steps=50,
                                verbose=True)
        gae2.fit(graph)
        embed2 = gae2.calculate_embeddings()
        embed3 = np.subtract(embed, embed2)
        self.assertAlmostEqual(
            np.sum(embed3), 0, 4,
            "fit method results in a different model when trained separately")
Exemple #3
0
    def gs_graphcase(self, G, dim_size):
        gs_res = {}
        dims = self.get_dims(dim_size)

        for lr in AmlSimPreprocessor.learning_rates:
            for do in AmlSimPreprocessor.dropout_rates:
                for act in AmlSimPreprocessor.act_functions:
                    gae = GraphAutoEncoder(
                        G,
                        support_size=AmlSimPreprocessor.support_size,
                        dims=dims,
                        batch_size=AmlSimPreprocessor.batch_size,
                        hub0_feature_with_neighb_dim=AmlSimPreprocessor.
                        hub0_feature_with_neighb_dim,
                        useBN=AmlSimPreprocessor.useBN,
                        verbose=True,
                        seed=1,
                        learning_rate=lr,
                        act=act,
                        dropout=do)
                    train_res = gae.fit(epochs=AmlSimPreprocessor.epochs,
                                        layer_wise=False)

                    # save results
                    act_str = 'tanh' if act == tf.nn.tanh else 'sigm'
                    run_id = f'dim_{dim_size}_lr_{lr}_do_{do}_act_{act_str}_layers_{self.layers}'
                    pickle.dump(train_res[None].history,
                                open(self.out_dir + 'res_' + run_id, "wb"))
                    gae.save_weights(self.out_dir + 'mdl_' + run_id)

                    # print and store result
                    val_los = sum(train_res[None].history['val_loss'][-2:]) / 2
                    gs_res[run_id] = val_los
                    print(
                        f'dims:{dim_size}, lr:{lr}, dropout lvl:{do}, act func:{act_str} resultsing val loss {val_los}'
                    )

        # print all results, save and return best model
        for k, v in gs_res.items():
            print(f'run: {k} with result {v}')
        pickle.dump(
            gs_res,
            open(self.out_dir + f'graphcase_gs_results_dim_{dim_size}', "wb"))
        return max(gs_res, key=gs_res.get)
Exemple #4
0
import example_graph_bell as gb
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import pickle
import random
#%%
graph = gb.create_directed_barbell(10, 10)
random.seed(2)
for u in graph.nodes(data=True):
    u[1]['label1'] = int(u[0])
    u[1]['label2'] = random.uniform(0.0, 1.0)
gae = GraphAutoEncoder(graph, learning_rate=0.01, support_size=[5, 5], dims=[3, 5, 7, 6, 2],
                       batch_size=12, max_total_steps=10, verbose=True, useBN=True)
gae.fit()
embed = gae.calculate_embeddings()
l1_struct, graph2 = gae.get_l1_structure(15, show_graph=True, node_label='feat0')

#%%

# print(l1_struct)
# train_res = {}
# for i in range(len(gae.dims)):
#     train_res["l"+str(i+1)] = gae.train_layer(i+1)

# train_res['all'] = gae.train_layer(len(gae.dims), all_layers=True, dropout=None)
# embed = gae.calculate_embeddings()
# filename = '/Users/tonpoppe/workspace/GraphCase/data/model1'
# gae.save_model(filename)