コード例 #1
0
                                       node_position_path='./data')
    model = Model(G.degree(),
                  size=representation_size,
                  table_size=5000000,
                  input_file=os.path.join(input_file, input_file + "_zachary"),
                  path_labels="./data")

    model.node_color = node_color

    # Sampling the random walks for context
    if sampling_path:
        log.info("sampling the paths")
        walk_files = graph_utils.write_walks_to_disk(
            G,
            walks_filebase,
            num_paths=number_walks,
            path_length=walk_length,
            alpha=0,
            rand=random.Random(9999999999),
            num_workers=num_workers)
    else:
        walk_files = [
            walks_filebase + '.' + str(i) for i in range(number_walks)
            if os.path.isfile(walks_filebase + '.' + str(i))
        ]

    #Learning algorithm
    node_learner = Node2Vec(workers=num_workers, negative=negative, lr=lr)
    cont_learner = Context2Vec(window_size=window_size,
                               workers=num_workers,
                               negative=negative,
                               lr=lr)
コード例 #2
0
ファイル: main.py プロジェクト: abegehr/ASDS_ComE
    ks = [2]  # number of communities
    walks_filebase = os.path.join(
        'data', input_file)  # where read/write the sampled path

    # CONSTRUCT THE GRAPH
    # G = graph_utils.load_matfile(os.path.join('./data', input_file, input_file + '.mat'), undirected=True)
    G = graph_utils.load_csv_edges(os.path.join('./data', input_file,
                                                input_file + '.csv'),
                                   undirected=True)
    # Sampling the random walks for context
    log.info("sampling the paths")
    walk_files = graph_utils.write_walks_to_disk(
        G,
        os.path.join(walks_filebase, "{}.walks".format(output_file)),
        num_paths=number_walks,
        path_length=walk_length,
        alpha=0,
        rand=random.Random(0),
        num_workers=num_workers)

    vertex_counts = graph_utils.count_textfiles(walk_files, num_workers)
    model = Model(vertex_counts,
                  size=representation_size,
                  down_sampling=down_sampling,
                  table_size=100000000,
                  input_file=os.path.join(input_file, input_file),
                  path_labels="./data")

    # Learning algorithm
    node_learner = Node2Vec(workers=num_workers, negative=negative, lr=lr)
    cont_learner = Context2Vec(window_size=window_size,
コード例 #3
0
                                       graph_name="karate",
                                       node_position_file=True,
                                       node_position_path='./data')

    exmple_filebase = os.path.join(
        "./data/",
        output_file + ".exmple")  # where read/write the sampled path
    num_iter = G.number_of_nodes() * num_walks * walk_length

    # Sampling the random walks for context
    log.info("sampling the paths")
    examples_files = graph_utils.write_walks_to_disk(
        G,
        exmple_filebase,
        windows_size=window_size,
        num_paths=num_walks,
        path_length=walk_length,
        alpha=0,
        rand=random.Random(9999999999),
        num_workers=num_workers)
    edges = np.array(G.edges())
    edges = np.concatenate((edges, np.fliplr(edges)))

    io_utils.save_embedding(model.get_node_embedding(),
                            "pytorch_embedding_random",
                            path="./data")

    # pre-training phase
    learn_second(o2_loss,
                 lr,
                 model,