def main():
    """Main execution steps
    Reads arguments, fixes random seeding, executes the HOPE model
    and saves resulting embeddings.
    """
    args = parse_args()

    # Set random seed if specified
    if args.seed is not None:
        np.random.seed(args.seed)
        random.seed(args.seed)
    # numpy seeds need to be in [0, 2^32-1]
    args.seed = [
        np.random.randint(4294967296 - 1) for i in range(args.num_embeddings)
    ]

    # Parse graph data
    graph_name, graph = parse_graph(args.dataset, args.largest_cc)
    graph = graph.to_directed()

    # Compute embeddings
    model = HOPE(d=128, beta=args.beta)

    print("Num nodes: %d, num edges: %d" %
          (graph.number_of_nodes(), graph.number_of_edges()))
    times = []
    for i in range(args.num_embeddings):
        t1 = time()

        # Set the seed before learning
        np.random.seed(args.seed[i])
        random.seed(args.seed[i])

        Y, t = model.learn_embedding(graph=graph,
                                     edge_f=None,
                                     is_weighted=True,
                                     no_python=True)
        times.append(time() - t1)

        # save embedding
        file_path = (f"{args.outputdir}/hope_{graph_name}_" f"{i:03d}.emb")
        print(f"Saving embedding to {file_path}")
        save_embedding(
            Y, file_path, {
                "algorithm": "hope",
                "dimension": args.dimensions,
                "beta": args.beta,
                "seed": args.seed[i],
            })

    print(model._method_name + "\n\tAverage training time: %f" %
          (sum(times) / len(times)))
Exemplo n.º 2
0
def main(data_set_name):
    dimensions = 4
    input_file = './graph/' + data_set_name + '.tsv'
    output_file = './emb/' + data_set_name + '.emb'
    # Instatiate the embedding method with hyperparameters
    graph_factorization = HOPE(dimensions, 1)

    # Load graph
    graph = graph_util.loadGraphFromEdgeListTxt(input_file)

    # Learn embedding - accepts a networkx graph or file with edge list
    embeddings_array, t = graph_factorization.learn_embedding(graph,
                                                              edge_f=None,
                                                              is_weighted=True,
                                                              no_python=True)

    embeddings = pandas.DataFrame(embeddings_array)
    embeddings.to_csv(output_file, sep=' ', na_rep=0.1)
Exemplo n.º 3
0
    def emb(self):
        try:
            open("emb_" + self.name + ".pkl")
            return

        except:
            res = []

            for key in self.keys:
                g = self.graph[key]

                s = nx.from_dict_of_dicts(g._adj)
                if self.type_emb == "hope":
                    embedding = HOPE(d=self.r_d, beta=0.01)
                else:
                    print("Wrong type!!!")
                    return
                Y, t = embedding.learn_embedding(graph=s,
                                                 edge_f=None,
                                                 is_weighted=True,
                                                 no_python=True)
                Y = np.asmatrix(Y)
                x = Y.T
                f = x * Y
                x = f
                if self.moment == 3:
                    x = f * f

                x = np.asarray(x)
                x = x.reshape(x.size, 1)
                res.append(x)
            pickle.dump(res, open("emb_" + str(self.name) + ".pkl", "wb"))


# emb=Create_emb("/home/polina/PycharmProjects/Multi_graphs/enron400_graph.pkl","try")
# emb.emb()
Exemplo n.º 4
0
    def learn_embeddings(self):
        hope = HOPE(d=self.dim, beta=0.01)
        hope.learn_embedding(self.graph)

        self.embeddings = hope.get_embedding()
Exemplo n.º 5
0
"""安装GEM的包
https://github.com/palash1992/GEM
"""
#encoding='utf-8'
from gem.utils import graph_util
from gem.evaluation import evaluate_graph_reconstruction as gr
from time import time
from gem.embedding.hope import HOPE
edge_f = './data/load_rename.csv'
isDirected = True
#load graph
G = graph_util.loadGraphFromEdgeListTxt(edge_f, directed=isDirected)
G = G.to_directed()
#模型运行
print('Num nodes: %d, num edges: %d' %
      (G.number_of_nodes(), G.number_of_edges()))
t1 = time()
#根据grid_search调参
embedding = HOPE(d=80, beta=0.01)
# Learn embedding - accepts a networkx graph or file with edge list
Y, t = embedding.learn_embedding(graph=G, is_weighted=True)
print(Y)
# print(Y)
# # Evaluate on graph reconstruction
# MAP, prec_curv, err, err_baseline = gr.evaluateStaticGraphReconstruction(G, embedding, Y, None)
Exemplo n.º 6
0
                                 is_weighted=True,
                                 no_python=True)
print 'Graph Factorization:\n\tTraining time: %f' % t

# Locally Linear Embedding
embedding = lle(2)  # d
Y, t = embedding.learn_embedding(graph=G,
                                 edge_f=None,
                                 is_weighted=True,
                                 no_python=True)
print 'Locally Linear Embedding:\n\tTraining time: %f' % t

# Hope
embedding = HOPE(4, 0.01)  # d, beta
Y, t = embedding.learn_embedding(graph=G,
                                 edge_f=None,
                                 is_weighted=True,
                                 no_python=True)
print 'HOPE:\n\tTraining time: %f' % t

# Laplacian Eigen maps
embedding = lap(4)  # d
Y, t = embedding.learn_embedding(graph=G,
                                 edge_f=None,
                                 is_weighted=True,
                                 no_python=True)
print 'Laplacian Eigenmaps:\n\tTraining time: %f' % t

# sdne
embedding = SDNE(d=2,
                 beta=5,
                 alpha=1e-5,