Exemple #1
0
import networkx as nx
from nodevectors import Node2Vec
import time
from gensim.models import KeyedVectors

g2v = Node2Vec.load(
    'C:/Users/DI_Lab/Desktop/연구실 자료/국보연/전지원/word2vec_20000test.model.zip')

# Save model to gensim.KeyedVector format
g2v.save_vectors("wheel_model.bin")

# load in gensim
print(g2v)
model = KeyedVectors.load_word2vec_format("wheel_model.bin")
print(model)
model[str("cve-2019-1020019")]
            pickle.dump(n2v, open("./graphs/n2v_sub_small1.pkl", "wb"))

        train_comp_expensive = False
        if train_comp_expensive:
            graphs_subset = [
                x for x in graphs if len(related[graphs.index(x)]) > 50
            ]
            for i, graph in tqdm(enumerate(graphs_subset),
                                 total=len(graphs_subset),
                                 leave=False):
                n2v = learn_embeddings(graph)
                n2v.save(f"./graphs/n2v_sub_huge-{i+1}.pckl")
                # pickle.dump(n2v, open(f"./graphs/n2v_sub_huge-{i+1}.pkl", "wb"))

save_huge_vecs = False
if save_huge_vecs:
    graphs = [
        Node2Vec.load(f"./graphs/huge_graphs/n2v_sub_huge-{i}.pckl.zip")
        for i in range(1, 6)
    ]
    for i, graph in enumerate(graphs):
        graph.save_vectors(f"./vectors/wheel_mode_graph-{i}.bin")

save_small_vecs = False
if save_small_vecs:
    if not save_huge_vecs:
        i = 5
    small_graphs = pickle.load(open("./graphs/n2v_sub_small1.pkl", "rb"))
    for j, graph in enumerate(small_graphs):
        graph.save_vectors(f"./vectors/wheel_mode_graph-{i+j}.bin")
Exemple #3
0
from nodevectors import Node2Vec

model_name = "keywords_deep"
idx_file = "data/word_index.pickle"
keywords_file = "data/mag_cs_keywords.csv"

# Load in relevant data and modules
keywords_full_data = pd.read_csv(keywords_file)
keywords_full_data['normalizedName'] = keywords_full_data[
    'normalizedName'].fillna('nan')
keywords_data = keywords_full_data['normalizedName']

with open(idx_file, 'rb') as f:
    word_to_idx = pickle.load(f)

keyword_embs = Node2Vec.load(model_name + ".zip")

# Process word queries
while True:
    print("Please enter a word to search: ")
    query_word = input()

    query_node_idx = -1
    query_node = None

    while query_node_idx < 0:
        try:
            query_node_idx = word_to_idx[query_word.lower()]
            query_node = keyword_embs.predict(query_node_idx)
        except:
            print(