コード例 #1
0
ファイル: main_GEM.py プロジェクト: aida-ugent/NRL4LP
def main(args):

    # Load edgelist
    G = graph_util.loadGraphFromEdgeListTxt(args.input, directed=args.directed)
    G = G.to_directed()

    # Preprocess the graph
    # G, _ = prep_graph(G)

    if args.method == 'gf':
        # GF takes embedding dimension (d), maximum iterations (max_iter), learning rate (eta),
        # regularization coefficient (regu) as inputs
        model = GraphFactorization(d=args.dimension,
                                   max_iter=args.max_iter,
                                   eta=args.eta,
                                   regu=args.regu)
    elif args.method == 'hope':
        # HOPE takes embedding dimension (d) and decay factor (beta) as inputs
        model = HOPE(d=args.dimension, beta=args.beta)
    elif args.method == 'lap':
        # LE takes embedding dimension (d) as input
        model = LaplacianEigenmaps(d=args.dimension)
    elif args.method == 'lle':
        # LLE takes embedding dimension (d) as input
        model = LocallyLinearEmbedding(d=args.dimension)
    elif args.method == 'sdne':
        encoder_layer_list = ast.literal_eval(args.encoder_list)
        # SDNE takes embedding dimension (d), seen edge reconstruction weight (beta), first order proximity weight
        # (alpha), lasso regularization coefficient (nu1), ridge regreesion coefficient (nu2), number of hidden layers
        # (K), size of each layer (n_units), number of iterations (n_ite), learning rate (xeta), size of batch (n_batch)
        # location of modelfile and weightfile save (modelfile and weightfile) as inputs
        model = SDNE(d=args.dimension,
                     beta=args.beta,
                     alpha=args.alpha,
                     nu1=args.nu1,
                     nu2=args.nu2,
                     K=len(encoder_layer_list),
                     n_units=encoder_layer_list,
                     n_iter=args.max_iter,
                     xeta=args.learning_rate,
                     n_batch=args.bs)
        # , modelfile=['enc_model.json', 'dec_model.json'], weightfile=['enc_weights.hdf5', 'dec_weights.hdf5'])
    else:
        raise ValueError('The requested method does not exist!')

    # Learn the node embeddings
    Y, t = model.learn_embedding(graph=G,
                                 edge_f=None,
                                 is_weighted=args.weighted,
                                 no_python=True)
    Z = np.real_if_close(Y, tol=1000)

    # Save the node embeddings to a file
    np.savetxt(args.output, Z, delimiter=',', fmt='%f')
コード例 #2
0
def main():
    """Main execution steps
    Reads arguments, fixes random seeding, executes the HOPE model
    and saves resulting embeddings.
    """
    args = parse_args()

    # Set random seed if specified
    if args.seed is not None:
        np.random.seed(args.seed)
        random.seed(args.seed)
    # numpy seeds need to be in [0, 2^32-1]
    args.seed = [
        np.random.randint(4294967296 - 1) for i in range(args.num_embeddings)
    ]

    # Parse graph data
    graph_name, graph = parse_graph(args.dataset, args.largest_cc)
    graph = graph.to_directed()

    # Compute embeddings
    model = HOPE(d=128, beta=args.beta)

    print("Num nodes: %d, num edges: %d" %
          (graph.number_of_nodes(), graph.number_of_edges()))
    times = []
    for i in range(args.num_embeddings):
        t1 = time()

        # Set the seed before learning
        np.random.seed(args.seed[i])
        random.seed(args.seed[i])

        Y, t = model.learn_embedding(graph=graph,
                                     edge_f=None,
                                     is_weighted=True,
                                     no_python=True)
        times.append(time() - t1)

        # save embedding
        file_path = (f"{args.outputdir}/hope_{graph_name}_" f"{i:03d}.emb")
        print(f"Saving embedding to {file_path}")
        save_embedding(
            Y, file_path, {
                "algorithm": "hope",
                "dimension": args.dimensions,
                "beta": args.beta,
                "seed": args.seed[i],
            })

    print(model._method_name + "\n\tAverage training time: %f" %
          (sum(times) / len(times)))
コード例 #3
0
ファイル: hope.py プロジェクト: alanyuchenhou/GEM
def main(data_set_name):
    dimensions = 4
    input_file = './graph/' + data_set_name + '.tsv'
    output_file = './emb/' + data_set_name + '.emb'
    # Instatiate the embedding method with hyperparameters
    graph_factorization = HOPE(dimensions, 1)

    # Load graph
    graph = graph_util.loadGraphFromEdgeListTxt(input_file)

    # Learn embedding - accepts a networkx graph or file with edge list
    embeddings_array, t = graph_factorization.learn_embedding(graph,
                                                              edge_f=None,
                                                              is_weighted=True,
                                                              no_python=True)

    embeddings = pandas.DataFrame(embeddings_array)
    embeddings.to_csv(output_file, sep=' ', na_rep=0.1)
コード例 #4
0
ファイル: Gecko.py プロジェクト: NewKnowledge/gecko
    def __init__(self, dim=4, models=[]):
        # Initialize set of possible models
        # see "Graph Embedding Techniques, Applications, and Performance: A Survey" by
        # Goyal and Ferrera (2017) for a taxonomy of graph embedding methods

        if not models:  # if no models specified, create some default ones
            # Presently all methods are "factorization based methods"
            # first method very expensive, unless C++ version installed
            # models.append(GraphFactorization(d=2, max_iter=100000, eta=1*10**-4, regu=1.0))
            models.append(HOPE(d=dim, beta=0.01))
            models.append(LaplacianEigenmaps(d=dim))
            models.append(LocallyLinearEmbedding(d=dim))
            # The following "random walk based" and "deep learning based" methods will be enabled in the future
            # models.append(node2vec(d=2, max_iter=1, walk_len=80, num_walks=10, con_size=10, ret_p=1, inout_p=1))
            # models.append(SDNE(d=2, beta=5, alpha=1e-5, nu1=1e-6, nu2=1e-6, K=3,n_units=[50, 15,], rho=0.3, n_iter=50, xeta=0.01,n_batch=500,
            #                modelfile=['./intermediate/enc_model.json', './intermediate/dec_model.json'],
            #                weightfile=['./intermediate/enc_weights.hdf5', './intermediate/dec_weights.hdf5']))
        self.models = models
コード例 #5
0
    def _get_embeddings(self, embedding_space):

        # You can comment out the methods you don't want to run
        models = list()
        for embed_method in self.embeddings:
##            if embed_method == EMEDDINGS.GRAPH_FACTORIZATIONE_MBEDDINGS:
##                models.append(GraphFactorization(embedding_space, 100000, 1 * 10 ** -4, 1.0))
            if embed_method == EMEDDINGS.LAPLACIAN_EIGENMAPS_EMBEDDINGS:
                models.append(LaplacianEigenmaps(embedding_space))
            if embed_method == EMEDDINGS.LOCALLY_LINEAR_EMBEDDING:
                models.append(LocallyLinearEmbedding(embedding_space))
            if embed_method == EMEDDINGS.HOPE_EMBEDDING:
                models.append(HOPE(2 + 1, 0.01))
            if embed_method == EMEDDINGS.NODE2VEC_EMBEDDING_EMBEDDINGS:
                models.append(node2vec(2, 1, 80, 10, 10, 1, 1))
            # Embeddings I was unable to get working yet - it seems that HOPE converts k to k+1 for some reason....
            # if embed_method == EMEDDINGS.SDNE_EMBEDDING_EMBEDDINGS:
            #     models.append(SDNE(d=2, beta=5, alpha=1e-5, nu1=1e-6, nu2=1e-6, K=3,n_units=[50, 15,], rho=0.3, n_iter=50, xeta=0.01,n_batch=500,
            #                modelfile=[base_path + '/intermediate/enc_model.json', base_path + '/intermediate/dec_model.json'],
            #                weightfile=[base_path + '/intermediate/enc_weights.hdf5', base_path + '/intermediate/dec_weights.hdf5']))
        return models
コード例 #6
0
ファイル: create_emb.py プロジェクト: louzounlab/ad-rnn
    def emb(self):
        try:
            open("emb_" + self.name + ".pkl")
            return

        except:
            res = []

            for key in self.keys:
                g = self.graph[key]

                s = nx.from_dict_of_dicts(g._adj)
                if self.type_emb == "hope":
                    embedding = HOPE(d=self.r_d, beta=0.01)
                else:
                    print("Wrong type!!!")
                    return
                Y, t = embedding.learn_embedding(graph=s,
                                                 edge_f=None,
                                                 is_weighted=True,
                                                 no_python=True)
                Y = np.asmatrix(Y)
                x = Y.T
                f = x * Y
                x = f
                if self.moment == 3:
                    x = f * f

                x = np.asarray(x)
                x = x.reshape(x.size, 1)
                res.append(x)
            pickle.dump(res, open("emb_" + str(self.name) + ".pkl", "wb"))


# emb=Create_emb("/home/polina/PycharmProjects/Multi_graphs/enron400_graph.pkl","try")
# emb.emb()
コード例 #7
0
uni_name = uni_model_df.unis[task_id]

#before running the embedding a check is done to see if the file is completed
completed_file_path = scratch_folder + "/" + use_model_type + "_" + uni_name + ".csv"

# load path of the university path
load_path = file_folder + "/" + uni_name + ".graphml"
# save path of the embedded data
save_path = project_folder + "/" + use_model_type + "_" + uni_name + ".csv"

# create an empty list of models
models = []
# using and else if statement load the model for this task
# The end result is a list that is 1 long
if use_model_type == "HOPE":
    models.append(HOPE(d=dims * 2, beta=0.01))
elif use_model_type == "LapEig":
    models.append(LaplacianEigenmaps(d=dims))
elif use_model_type == "LLE":
    models.append(LocallyLinearEmbedding(d=dims))
elif use_model_type == "node2vec":
    models.append(
        node2vec(d=2,
                 max_iter=1,
                 walk_len=80,
                 num_walks=10,
                 con_size=10,
                 ret_p=1,
                 inout_p=1))
else:
    # This logically has to be SDNE as there are no other options
コード例 #8
0
    def learn_embeddings(self):
        hope = HOPE(d=self.dim, beta=0.01)
        hope.learn_embedding(self.graph)

        self.embeddings = hope.get_embedding()
コード例 #9
0
ファイル: test.py プロジェクト: vitamins/GEM
from gem.embedding.sdne import SDNE

# File that contains the edges. Format: source target
# Optionally, you can add weights as third column: source target weight
edge_f = './gem/data/karate.edgelist'
# Specify whether the edges are directed
isDirected = True

# Load graph
G = graph_util.loadGraphFromEdgeListTxt(edge_f, directed=isDirected)
G = G.to_directed()

models = []
# You can comment out the methods you don't want to run
models.append(GraphFactorization(2, 1 * 10**-4, 1.0, 50000))
models.append(HOPE(4, 0.01))
models.append(LaplacianEigenmaps(2))
models.append(LocallyLinearEmbedding(2))
#models.append(node2vec(2, 1, 80, 10, 10, 1, 1))
#models.append(SDNE(d=2, beta=5, alpha=1e-5, nu1=1e-6, nu2=1e-6, K=3,n_units=[50, 15,], rho=0.3, n_iter=50, xeta=0.01,n_batch=500,
#                modelfile=['./intermediate/enc_model.json', './intermediate/dec_model.json'],
#                weightfile=['./intermediate/enc_weights.hdf5', './intermediate/dec_weights.hdf5']))

for embedding in models:
    print('Num nodes: %d, num edges: %d' %
          (G.number_of_nodes(), G.number_of_edges()))
    t1 = time()
    # Learn embedding - accepts a networkx graph or file with edge list
    Y, t = embedding.learn_embedding(graph=G,
                                     edge_f=None,
                                     is_weighted=True,
コード例 #10
0
 def __init__(self, dims, beta):
     super().__init__()
     self.dims = dims
     self.beta = beta
     self.embedding_model = HOPE(d=dims, beta=beta)
コード例 #11
0
try:
    node_colors = pickle.load(
        open('data/sbm_node_labels.pickle', 'rb')
    )
except UnicodeDecodeError:
    node_colors = pickle.load(
        open('data/sbm_node_labels.pickle', 'rb'), encoding='latin1'
    )
node_colors_arr = [None] * node_colors.shape[0]
for idx in range(node_colors.shape[0]):
    node_colors_arr[idx] = np.where(node_colors[idx, :].toarray() == 1)[1][0]

models = []
# Load the models you want to run
models.append(GraphFactorization(d=128, max_iter=1000, eta=1 * 10**-4, regu=1.0, data_set='sbm'))
models.append(HOPE(d=256, beta=0.01))
models.append(LaplacianEigenmaps(d=128))
models.append(LocallyLinearEmbedding(d=128))
models.append(node2vec(d=182, max_iter=1, walk_len=80, num_walks=10, con_size=10, ret_p=1, inout_p=1, data_set='sbm'))
models.append(SDNE(d=128, beta=5, alpha=1e-5, nu1=1e-6, nu2=1e-6, K=3,n_units=[500, 300,], rho=0.3, n_iter=30, xeta=0.001,n_batch=500,
                modelfile=['enc_model.json', 'dec_model.json'],
                weightfile=['enc_weights.hdf5', 'dec_weights.hdf5']))
# For each model, learn the embedding and evaluate on graph reconstruction and visualization
for embedding in models:
    print ('Num nodes: %d, num edges: %d' % (G.number_of_nodes(), G.number_of_edges()))
    t1 = time()
    # Learn embedding - accepts a networkx graph or file with edge list
    Y, t = embedding.learn_embedding(graph=G, edge_f=None, is_weighted=True, no_python=True)
    print (embedding._method_name+':\n\tTraining time: %f' % (time() - t1))
    # Evaluate on graph reconstruction
    MAP, prec_curv, err, err_baseline = gr.evaluateStaticGraphReconstruction(G, embedding, Y, None)
コード例 #12
0
ファイル: test_karate.py プロジェクト: shiyy123/GEMModify
            edge_f = os.path.join(edge_file_dir, tmp)

            # if edge_f.__contains__("71/2403/8963027.edgelist") or \
            #         edge_f.__contains__("71/2281/8951281.edgelist") or \
            #         edge_f.__contains__("71/2281/8951162.edgelist"):
            #     continue

            # Specify whether the edges are directed
            isDirected = True

            # Load graph
            G = graph_util.loadGraphFromEdgeListTxt(edge_f,
                                                    directed=isDirected)
            G = G.to_directed()

            models = [HOPE(d=4, beta=0.01)]
            # Load the models you want to run
            # models.append(GraphFactorization(d=2, max_iter=50000, eta=1 * 10 ** -4, regu=1.0))
            # models.append(LaplacianEigenmaps(d=2))
            # models.append(LocallyLinearEmbedding(d=2))
            # models.append(node2vec(d=2, max_iter=1, walk_len=80, num_walks=10, con_size=10, ret_p=1, inout_p=1))
            # models.append(SDNE(d=2, beta=5, alpha=1e-5, nu1=1e-6, nu2=1e-6, K=3, n_units=[50, 15, ], rho=0.3, n_iter=50, xeta=0.01,
            #                    n_batch=100,
            #                    modelfile=['enc_model.json', 'dec_model.json'],
            #                    weightfile=['enc_weights.hdf5', 'dec_weights.hdf5']))

            # For each model, learn the embedding and evaluate on graph reconstruction and visualization

            for embedding in models:
                # print ('Num nodes: %d, num edges: %d' % (G.number_of_nodes(), G.number_of_edges()))
                t1 = time()
コード例 #13
0
def plot_embed_graph(subreddit_title,
                     edges,
                     positions=None,
                     node_labels=None,
                     node_colors=None,
                     edge_colors=None,
                     with_labels=True):
    plot_HOPE = 1
    plot_LE = 0
    plot_LLE = 0
    # Construct Graph or DiGraph from data collected.
    G = nx.DiGraph()
    print("Adding edges...")
    for edge in edges:
        G.add_edge(edge[0], edge[1])

    models = []
    if (plot_HOPE):
        # HOPE takes embedding dimension (d) and decay factor (beta) as inputs
        models.append(HOPE(d=4, beta=0.01))
    if (plot_LE):
        # LE takes embedding dimension (d) as input
        models.append(LaplacianEigenmaps(d=2))
    if (plot_LLE):
        # LLE takes embedding dimension (d) as input
        models.append(LocallyLinearEmbedding(d=2))

    model_count = 0
    graph_out = None
    for embedding in models:
        model_count = model_count + 1
        plt.figure(model_count)
        print('Num nodes: %d, num edges: %d' %
              (G.number_of_nodes(), G.number_of_edges()))
        skip_training = 0
        if not skip_training:
            t1 = time()
            # Learn embedding - accepts a networkx graph or file with edge list
            print("Now we train...")
            try:
                Y, t = embedding.learn_embedding(graph=G,
                                                 edge_f=None,
                                                 is_weighted=True,
                                                 no_python=True)
            except ValueError:
                regular_plot(subreddit_title,
                             edges,
                             positions=None,
                             node_labels=node_labels,
                             node_colors=node_colors,
                             edge_colors=edge_colors,
                             with_labels=with_labels)
                return
            print(embedding._method_name + ':\n\tTraining time: %f' %
                  (time() - t1))
            # Evaluate on graph reconstruction
            # MAP, prec_curv, err, err_baseline = gr.evaluateStaticGraphReconstruction(G, embedding, Y, None)
            #---------------------------------------------------------------------------------
            # print(("\tMAP: {} \t precision curve: {}\n\n\n\n"+'-'*100).format(MAP,prec_curv[:5]))
        #---------------------------------------------------------------------------------
        # Visualize
        print("Training finished... Let's visualize...")
        graph_out = regular_plot(subreddit_title,
                                 edges,
                                 positions=embedding.get_embedding(),
                                 node_labels=node_labels,
                                 node_colors=node_colors,
                                 edge_colors=edge_colors,
                                 with_labels=with_labels)
        # viz.plot_embedding2D(embedding.get_embedding(), di_graph=G, node_colors=sub_colors, labels=graph_labels)
        # plt.title("Scraping Reddit starting from "+subreddit_title)
        # plt.show()
    return graph_out
コード例 #14
0
ファイル: Hope.py プロジェクト: caijiangyao1991/network
"""安装GEM的包
https://github.com/palash1992/GEM
"""
#encoding='utf-8'
from gem.utils import graph_util
from gem.evaluation import evaluate_graph_reconstruction as gr
from time import time
from gem.embedding.hope import HOPE
edge_f = './data/load_rename.csv'
isDirected = True
#load graph
G = graph_util.loadGraphFromEdgeListTxt(edge_f, directed=isDirected)
G = G.to_directed()
#模型运行
print('Num nodes: %d, num edges: %d' %
      (G.number_of_nodes(), G.number_of_edges()))
t1 = time()
#根据grid_search调参
embedding = HOPE(d=80, beta=0.01)
# Learn embedding - accepts a networkx graph or file with edge list
Y, t = embedding.learn_embedding(graph=G, is_weighted=True)
print(Y)
# print(Y)
# # Evaluate on graph reconstruction
# MAP, prec_curv, err, err_baseline = gr.evaluateStaticGraphReconstruction(G, embedding, Y, None)
コード例 #15
0
 def __init__(self, d=2, beta=0.01):
     self.model = HOPE(d=d, beta=beta)
コード例 #16
0
def benchmark(x, cv=5):
    """This function automatically runs through a series of benchmarks for unsupervised learning (MAP), semi-supervised learning, and supervised learning (cross validation accuracy with random forest classifiers) for the provided input dataset.
    
    # Arguments:
        x (NEGraph): A NeuroEmbed graph.
        cv (int): Optional. Number of cross-validation folds to use.
        
    # Returns:
        dict: A result dictionary with all models and results.
    """
    all_results = {}
    G, X, y, S, names = x.G, x.X, x.y, x.S, x.names
    out_metrics = {}
    model = ASEEmbedding()
    model.fit(X)
    MAP, prec_curv, err, err_baseline = gr.evaluateStaticGraphReconstruction(
        G, model, model.H, is_undirected=False, is_weighted=True)
    out_metrics['MAP'] = MAP
    d = model.H.shape[1] // 2
    out_metrics = generate_metrics(G, model, model.H, y, model.y, S, cv=cv)
    all_results['ASE'] = out_metrics
    raw_model = RawEmbedding()
    raw_model.fit(X, n_components=d)
    out_metrics = generate_metrics(G,
                                   raw_model,
                                   raw_model.H,
                                   y,
                                   raw_model.y,
                                   S,
                                   cv=cv)
    all_results['Raw'] = out_metrics
    G = nx.from_numpy_matrix(X, create_using=nx.DiGraph)
    Gd = nx.from_numpy_matrix(X + 1e-9, create_using=nx.DiGraph)
    models = {}
    if N2VC_available:
        models['node2vec'] = node2vec(d=d,
                                      max_iter=10,
                                      walk_len=80,
                                      num_walks=10,
                                      con_size=10,
                                      ret_p=1,
                                      inout_p=1)
    models['HOPE'] = HOPE(d=d, beta=0.01)
    models['Laplacian Eigenmaps'] = LaplacianEigenmaps(d=d)
    for model_name, embedding in models.items():
        if model_name == 'node2vec':
            Xh, t = embedding.learn_embedding(graph=Gd,
                                              edge_f=None,
                                              is_weighted=True,
                                              no_python=True)
            MAP, prec_curv, err, err_baseline = gr.evaluateStaticGraphReconstruction(
                Gd, embedding, Xh, is_undirected=False, is_weighted=False)
        else:
            Xh, t = embedding.learn_embedding(graph=G,
                                              edge_f=None,
                                              is_weighted=True,
                                              no_python=True)
            MAP, prec_curv, err, err_baseline = gr.evaluateStaticGraphReconstruction(
                G, embedding, Xh, is_undirected=False, is_weighted=False)
        Xh = np.real(Xh)
        if y is not None:
            clf = RandomForestClassifier(n_estimators=200)
            clf = MLPClassifier(alpha=1, max_iter=100000)
            clusterer = GaussianMixture(n_components=Xh.shape[1])
            clusterer.fit(Xh)
            predict_labels = clusterer.predict(Xh)
            scores = cross_val_score(clf, Xh, y, cv=cv)
            out_metrics['CV'] = scores.mean()
            if S is not None:
                scores = cross_val_score(clf, np.hstack((Xh, S)), y, cv=cv)
                out_metrics['CVAnatomy+Graph'] = scores.mean()
                scores = cross_val_score(clf, S, y, cv=cv)
                out_metrics['CVAnatomyOnly'] = scores.mean()
            out_metrics['ARC Clustering'] = metrics.adjusted_rand_score(
                y, predict_labels)
            out_metrics['AMI Clustering'] = metrics.adjusted_mutual_info_score(
                y, predict_labels)
        out_metrics['MAP'] = MAP
        print(model_name, out_metrics)
        all_results[model_name] = out_metrics
    return all_results
コード例 #17
0
ファイル: test.py プロジェクト: THUfl12/NETest
Y, t = embedding.learn_embedding(graph=G,
                                 edge_f=None,
                                 is_weighted=True,
                                 no_python=True)
print 'Graph Factorization:\n\tTraining time: %f' % t

# Locally Linear Embedding
embedding = lle(2)  # d
Y, t = embedding.learn_embedding(graph=G,
                                 edge_f=None,
                                 is_weighted=True,
                                 no_python=True)
print 'Locally Linear Embedding:\n\tTraining time: %f' % t

# Hope
embedding = HOPE(4, 0.01)  # d, beta
Y, t = embedding.learn_embedding(graph=G,
                                 edge_f=None,
                                 is_weighted=True,
                                 no_python=True)
print 'HOPE:\n\tTraining time: %f' % t

# Laplacian Eigen maps
embedding = lap(4)  # d
Y, t = embedding.learn_embedding(graph=G,
                                 edge_f=None,
                                 is_weighted=True,
                                 no_python=True)
print 'Laplacian Eigenmaps:\n\tTraining time: %f' % t

# sdne
コード例 #18
0
 def HOPE(netData, **kwargs):
     d = kwargs.get('d', 4)
     beta = kwargs.get('beta', 0.01)
     from gem.embedding.hope import HOPE
     emb = HOPE(d=d, beta=beta)
     return attMethods.GEMexport(netData, emb)
コード例 #19
0
                embedding._X = X
                AP, ROC = evaluation_measures.calc_aproc_us(
                    embedding, X, train_digraph, test_digraph, sample_edges)
                AP_VERSE[it2][it1] = AP
                ROC_VERSE[it2][it1] = ROC

            print("evaluating for HOPE")
            for it2 in xrange(len(dimensions)):
                print(it1, it2)
                dim = dimensions[it2]
                file_name = 'SAVER/' + fig_name[fig] + str(
                    it1 + 1) + '/HOPE_' + str(dim)
                parameter_file = open(file_name, 'rb')
                X = pickle.load(parameter_file)
                parameter_file.close()
                embedding = HOPE(d=dim, beta=0.01)
                embedding._X = X
                AP, ROC = evaluation_measures.calc_aproc_us(
                    embedding, X, train_digraph, test_digraph, sample_edges)
                AP_HOPE[it2][it1] = AP
                ROC_HOPE[it2][it1] = ROC

        mean_LE = np.mean(AP_LE, axis=1)
        std_LE = np.std(AP_LE, axis=1)
        mean_DEEPWALK = np.mean(AP_DEEPWALK, axis=1)
        std_DEEPWALK = np.std(AP_DEEPWALK, axis=1)
        mean_n2vA = np.mean(AP_n2vA, axis=1)
        std_n2vA = np.std(AP_n2vA, axis=1)
        mean_n2vB = np.mean(AP_n2vB, axis=1)
        std_n2vB = np.std(AP_n2vB, axis=1)
        mean_VERSE = np.mean(AP_VERSE, axis=1)
コード例 #20
0
# File that contains the edges. Format: source target
# Optionally, you can add weights as third column: source target weight
edge_f = 'data/karate.edgelist'
# Specify whether the edges are directed
isDirected = True

# Load graph
G = graph_util.loadGraphFromEdgeListTxt(edge_f, directed=isDirected)
G = G.to_directed()

models = []
# You can comment out the methods you don't want to run
models.append(
    GraphFactorization(d=2, max_iter=100000, eta=1 * 10**-4, regu=1.0))
models.append(HOPE(d=4, beta=0.01))
models.append(LaplacianEigenmaps(d=2))
models.append(LocallyLinearEmbedding(d=2))
#models.append(node2vec(d=2, max_iter=1, walk_len=80, num_walks=10, con_size=10, ret_p=1, inout_p=1))
#models.append(SDNE(d=2, beta=5, alpha=1e-5, nu1=1e-6, nu2=1e-6, K=3,n_units=[50, 15,], rho=0.3, n_iter=50, xeta=0.01,n_batch=500,
#                modelfile=['./intermediate/enc_model.json', './intermediate/dec_model.json'],
#                weightfile=['./intermediate/enc_weights.hdf5', './intermediate/dec_weights.hdf5']))

for embedding in models:
    print('Num nodes: %d, num edges: %d' %
          (G.number_of_nodes(), G.number_of_edges()))
    t1 = time()
    # Learn embedding - accepts a networkx graph or file with edge list
    Y, t = embedding.learn_embedding(graph=G,
                                     edge_f=None,
                                     is_weighted=True,
コード例 #21
0
def get_embeddings(graph,
                   embedding_algorithm_enum,
                   dimension_count,
                   hyperparameter,
                   lower=None,
                   higher=None):
    """Generate embeddings. """

    if embedding_algorithm_enum is EmbeddingType.LocallyLinearEmbedding:
        embedding_alg = LocallyLinearEmbedding(d=dimension_count)
    elif embedding_algorithm_enum is EmbeddingType.Hope:
        embedding_alg = HOPE(d=dimension_count, beta=0.01)
    elif embedding_algorithm_enum is EmbeddingType.GF:
        embedding_alg = GraphFactorization(d=dimension_count,
                                           max_iter=100000,
                                           eta=1 * 10**-4,
                                           regu=1.0)
    elif embedding_algorithm_enum is EmbeddingType.LaplacianEigenmaps:
        embedding_alg = LaplacianEigenmaps(d=dimension_count)

    elif embedding_algorithm_enum is EmbeddingType.DegreeNeigDistributionWithout:
        A = np.array([
            np.histogram([graph.degree(neig) for neig in graph.neighbors(i)],
                         bins=dimension_count,
                         density=True,
                         range=(lower, higher))[0] for i in graph.nodes()
        ])
        A = (A - A.mean(axis=0)) / A.std(axis=0)
        return A

    elif embedding_algorithm_enum is EmbeddingType.DegreeNeigDistribution:
        A = np.array([
            np.concatenate([
                np.array([graph.degree(i) / (higher * dimension_count)]),
                np.histogram(
                    [graph.degree(neig) for neig in graph.neighbors(i)],
                    bins=dimension_count - 1,
                    density=True,
                    range=(lower, higher))[0]
            ],
                           axis=0) for i in graph.nodes()
        ])
        A = (A - A.mean(axis=0)) / A.std(axis=0)
        return A

    elif embedding_algorithm_enum is EmbeddingType.DegreeNeigNeigDistribution:
        bin_length = int(dimension_count / 2)

        A = np.array([
            np.concatenate([
                np.array([graph.degree(i) / (higher)]),
                np.histogram(
                    [graph.degree(neig) for neig in graph.neighbors(i)],
                    bins=bin_length,
                    density=True,
                    range=(lower, higher))[0],
                np.histogram([
                    graph.degree(neigneig) for neig in graph.neighbors(i)
                    for neigneig in graph.neighbors(neig)
                ],
                             bins=bin_length,
                             density=True,
                             range=(lower, higher))[0]
            ],
                           axis=0) for i in graph.nodes()
        ])

        A = (A - A.mean(axis=0)) / A.std(axis=0)
        A[:, 0] = A[:, 0]
        A[:, 1:1 + bin_length] = A[:, 1:1 + bin_length]
        A[:, 2 + bin_length:] = A[:, 2 + bin_length:] * hyperparameter
        A = np.nan_to_num(A)
        return A
    else:
        raise NotImplementedError

    A, t = embedding_alg.learn_embedding(graph=graph, no_python=True)

    A = np.dot(A, np.diag(np.sign(np.mean(A, axis=0))))
    A = (A - A.mean(axis=0)) / A.std(axis=0)
    return A