示例#1
0
    def __init__(self, **kwargs):
        # init some parameters
        self.g_type = kwargs.get('graph_type', 'powerlaw')
        #'erdos_renyi', 'powerlaw', 'small-world', 'barabasi_albert'
        global NUM_MIN
        global NUM_MAX
        global MAX_ITERATION
        global combineID

        NUM_MIN = kwargs.get('NUM_MIN', NUM_MIN)
        NUM_MAX = kwargs.get('NUM_MAX', NUM_MAX)
        MAX_ITERATION = kwargs.get('MAX_ITERATION', MAX_ITERATION)
        combineID = kwargs.get('combineID', combineID)

        self.weighted = kwargs.get("weighted", False)
        self.embedding_size = EMBEDDING_SIZE
        self.learning_rate = LEARNING_RATE
        self.reg_hidden = REG_HIDDEN
        self.TrainSet = graph.py_GSet()
        self.TestSet = graph.py_GSet()
        self.utils = utils.py_Utils()
        self.TrainBetwList = []
        self.TestBetwList = []
        self.metrics = metrics.py_Metrics()
        self.inputs = dict()
        self.activation = tf.nn.leaky_relu   #leaky_relu relu selu elu

        self.ngraph_train = 0
        self.ngraph_test = 0

        tf.compat.v1.disable_v2_behavior()
        # [node_cnt, node_feat_dim]
        self.node_feat = tf.compat.v1.placeholder(tf.float32, name="node_feat")
        # [node_cnt, aux_feat_dim]
        self.aux_feat = tf.compat.v1.placeholder(tf.float32, name="aux_feat")
        # [node_cnt, node_cnt]
        self.n2nsum_param = tf.compat.v1.sparse_placeholder(tf.float64, name="n2nsum_param")


        # [node_cnt,1]
        self.label = tf.compat.v1.placeholder(tf.float32, shape=[None,1], name="label")
        # sample node pairs to compute the ranking loss
        self.pair_ids_src = tf.compat.v1.placeholder(tf.int32, shape=[1,None], name='pair_ids_src')
        self.pair_ids_tgt = tf.compat.v1.placeholder(tf.int32, shape=[1,None], name='pair_ids_tgt')

        self.loss, self.trainStep, self.betw_pred, self.node_embedding, self.param_list = self.BuildNet()

        self.saver = tf.compat.v1.train.Saver(max_to_keep=None)
        config = tf.compat.v1.ConfigProto(device_count={"CPU": 8},  # limit to num_cpu_core CPU usage
                                inter_op_parallelism_threads=100,
                                intra_op_parallelism_threads=100,
                                log_device_placement=False)
        config.gpu_options.allow_growth = True
        self.session = tf.compat.v1.Session(config=config)

        self.session.run(tf.compat.v1.global_variables_initializer())
示例#2
0
import pickle as cp
import time

import fire
import networkx as nx
import tensorflow as tf
from tensorflow.keras.models import load_model, Model
from tqdm import trange, tqdm

import metrics
from betlearn import DataGenerator, EvaluateCallback

metrics = metrics.py_Metrics()


def evaluate_synthetic_data(data_test, model_path):
    """ This function is most probably wrong because the synthetic data is one file per graph and score """
    model = load_model(model_path, custom_objects={'tf': tf}, compile=False)
    data = DataGenerator(tag='Synthetic', include_idx_map=True, random_samples=False, compute_betweenness=False)
    evaluate = EvaluateCallback(data, prepend_str='')
    evaluate.set_model(model)

    with open(data_test, 'rb') as f:
        valid_data = cp.load(f)
    graph_list = valid_data[0]
    betweenness_list = valid_data[1]

    for i in trange(100):
        g = graph_list[i]
        data.add_graph(g)
    data.betweenness = betweenness_list
示例#3
0
 def __init__(self, data_generator, prepend_str: str = 'val_'):
     super().__init__()
     self.data_generator = data_generator
     self.prepend_str = prepend_str
     self.metrics = metrics.py_Metrics()
     self._supports_tf_logs = True