예제 #1
0
# the number of nodes should be equal to the number of unique terms
assert len(g.vs) == len(set(my_tokens))

edge_weights = []
for edge in g.es:
    source = g.vs[edge.source]['name']
    target = g.vs[edge.target]['name']
    weight = edge['weight']
    edge_weights.append([source, target, weight])

print(edge_weights)

for w in range(2, 10):
    ### fill the gap ### # build a graph-of-words g
    g = terms_to_graph(my_tokens, w)
    print(g.density())

# decompose g
core_numbers = unweighted_k_core(g)
print(core_numbers)
# compare with igraph method
print(dict(zip(g.vs['name'], g.coreness())))

# retain main core as keywords
max_c_n = max(list(core_numbers.values()))
keywords = [
    word for word in core_numbers.keys() if core_numbers[word] == max_c_n
]  ### fill the gap ### # you may use a list comprehension
print(keywords)
# number of edges
print(len(list(g.edges)))

# the number of nodes should be equal to the number of unique terms
len(list(g.nodes)) == len(set(my_tokens))

edge_weights = []
for (source, target, weight) in g.edges(data='weight'):
    edge_weights.append([source, target, weight])

print(edge_weights)

for w in range(2, 11):
    g = terms_to_graph(my_tokens, w)
    # print density of g
    print(nx.density(g))

# decompose g
core_numbers = unweighted_k_core(g)  # fill the gap #
print(core_numbers)

# compare with igraph method

cn = nx.core_number(nx.Graph(g))

# retain main core as keywords
max_c_n = max(core_numbers.values())
keywords = [kwd for kwd, c_n in core_numbers.items() if c_n == max_c_n]
print(keywords)