Пример #1
0
def sdne_run():
    Graph = read_graph('wiki/Wiki_edgelist.txt')
    sden_model = sdne(
        Graph=Graph,
        dimension_size=128,
        per_vertex=100,
        walk_length=10,
        window_size=5,
        work=1,
        beta=5,
        alpha=1e-6,
        verbose=1,
        epochs=1000,
        batch_size=512,
        log_dir='logs/0/',
        hidden_size_list=[256, 128],
        l1=1e-5,
        l2=1e-4
    )
    sden_model.train()
    embeddings = sden_model.get_embeddings()

    from kon.model.embedding.util.evaluate import evaluate_tools
    eval_tool = evaluate_tools(embeddings, label_path='wiki/Wiki_labels.txt')
    eval_tool.plot_embeddings()
Пример #2
0
def deep_walk_run(edgelist_path, is_evluate=False):
    Graph = read_graph(edgelist_path)

    deepwalk = DeepWalk(Graph=Graph,
                        per_vertex=80,
                        walk_length=10,
                        window_size=5,
                        dimension_size=8,
                        work=4)
    embeddings = deepwalk.transform()
    if is_evluate:
        eval = evaluate_tools(embeddings=embeddings,
                              label_path='wiki/Wiki_labels.txt')
        eval.plot_embeddings()
    return embeddings
Пример #3
0
def node2vec_run():
    Graph = read_graph('wiki/Wiki_edgelist.txt')

    node_vec = node2vec(Graph=Graph,
                        per_vertex=80,
                        walk_length=10,
                        window_size=5,
                        dimension_size=128,
                        work=1,
                        p=0.25,
                        q=4)

    embeddings = node_vec.transform()
    eval_tool = evaluate_tools(embeddings, label_path='wiki/Wiki_labels.txt')
    eval_tool.plot_embeddings()
Пример #4
0
def line_run():
    from kon.model.embedding.util.util_tool import read_graph
    import os
    print(os.getcwd())
    Graph = read_graph('wiki/Wiki_edgelist.txt')
    line = Line(
        Graph=Graph,
        dimension_size=128,
        per_vertex=100,
        walk_length=10,
        window_size=5,
        work=1,
        negative_ratio=1,
        batch_size=128,
        log_dir='logs/0/',
        epoch=100,
    )
    embeddings = line.transform()
    from kon.model.embedding.util.evaluate import evaluate_tools
    tool = evaluate_tools(embeddings, label_path='wiki/Wiki_labels.txt')
    tool.plot_embeddings()
Пример #5
0
        for num in tqdm(range(self.walk_epoch), desc='walk epoch'):
            random.shuffle(self.all_nodes)
            for vertex in tqdm(self.all_nodes, desc='generator node walk seq'):
                sentence_list.append(self.random_walk(start_vertex=vertex))

        return sentence_list

    def transform(self):
        sentence_list = self.deepwalk()
        embeddings = self.embdding_train(sentence_list)
        return embeddings


if __name__ == '__main__':

    Graph = read_graph('wiki/Wiki_edgelist.txt')

    deepwalk = DeepWalk(Graph=Graph,
                        per_vertex=80,
                        walk_length=10,
                        window_size=5,
                        dimension_size=64,
                        work=4)

    embeddings = deepwalk.transform()
    print(embeddings.keys())
    print(embeddings.values())
    eval = evaluate_tools(embeddings=embeddings)
    eval.plot_embeddings()
Пример #6
0
        return self.model

    def get_embeddings(self):
        embeddings = {}
        pred_embeddings = self.embedding_model.predict(
            self.W, batch_size=self.batch_size)

        rank = 0
        for embedding in pred_embeddings:
            embeddings[self.idx2node[rank]] = embedding
            rank += 1
        return embeddings


if __name__ == '__main__':
    Graph = read_graph()
    sden_model = sdne(Graph=Graph,
                      dimension_size=128,
                      per_vertex=100,
                      walk_length=10,
                      window_size=5,
                      work=1,
                      beta=5,
                      alpha=1e-6,
                      verbose=1,
                      epochs=1000,
                      batch_size=512,
                      log_dir='model/embedding/setence_model/logs/0/',
                      hidden_size_list=[256, 128],
                      l1=1e-5,
                      l2=1e-4)