Esempio n. 1
0
def write_directed_graph(sentences):
    edges = []
    for chunks in sentences:
        for chunk in chunks:
            phrase = chunk.get_phrase()
            if chunk.dst != -1 and phrase:
                edges.append((phrase, chunks[chunk.dst].get_phrase()))
    g = pdt.graph_from_edges(edges, directed=True)
    g.write_jpeg('graph_from_edges_dot.jpg', prog='dot')
Esempio n. 2
0
def test_empty_list(directed, graph_type, edges):
    graph = pydot_ng.graph_from_edges(edges, directed=directed)

    assert graph.graph_type == graph_type
    assert graph.to_string() == dedent("""\
        {graph_type} G {{
        }}
        """.format(graph_type=graph_type))
    assert not graph.get_edges()
Esempio n. 3
0
def sentence2directed_graph(file_name, s_id):
    dependencies = get_dependencies(file_name, s_id)
    edges = []
    dep_iter = dependencies.iter('dep')
    for dep in dep_iter:
        if dep.get('type') == 'punct':
            continue
        print(dep[0].text, dep[1].text)
        edges.append((dep[0].text, dep[1].text))
    g = pdt.graph_from_edges(edges, directed=True)
    g.write_jpeg('p57_result.jpg', prog='dot')
Esempio n. 4
0
def test_edge_types(prefix, src, dst):
    input_edges = [(src, dst)]
    graph = pydot_ng.graph_from_edges(input_edges, node_prefix=prefix)

    edges = graph.get_edges()
    assert len(edges) == 1
    edge = edges[0]

    with_prefix = functools.partial("{0}{1}".format, prefix)

    assert edge.source == pydot_ng.quote_if_necessary(with_prefix(src))
    assert edge.destination == pydot_ng.quote_if_necessary(with_prefix(dst))
Esempio n. 5
0
def test_from_edge_to_string(prefix, output):
    input_edges = [(1, 2), (2, 3.14), (3.14, "a"), ("a", "ą"), ("ą", True)]

    graph = pydot_ng.graph_from_edges(input_edges, node_prefix=prefix)
    assert len(graph.get_edges()) == len(input_edges)
    assert graph.to_string() == output
Esempio n. 6
0
                chunks.clear()

    return result


if __name__ == '__main__':

    sentence = input('文を入力してください : ')

    # Cabochaで入力文を解析して結果をファイルに保存
    with open('sentence.cabocha', 'w') as write_file:
        cabocha = CaboCha.Parser()
        write_file.write(
            cabocha.parse(sentence).toString(CaboCha.FORMAT_LATTICE))

    # 係り元と係り先の文節を取得
    edges = []
    for line in get_chunk_list():
        for chunk in line:
            if chunk.dst != -1:
                src = chunk.surface()
                dst = line[chunk.dst].surface()
                if src != '' and dst != '':
                    edges.append(((src, dst)))

    # グラフを描画
    graph = pydot.graph_from_edges(edges, directed=True)
    graph.write_png('result.png')

    # filter
Esempio n. 7
0
from knock41 import get_sentences
import pydot_ng as pydot

if __name__ == "__main__":
	sentences = get_sentences()
	chunks = sentences[5]
	edges = []
	for idx, chunk in enumerate(chunks):
		if chunk.dst != -1 and chunk.morphs[0].pos != "記号":
			src_surface = chunk.surface
			dst_surface = chunks[chunk.dst].surface
			edges.append((src_surface, dst_surface))

	g = pydot.graph_from_edges(edges)
	g.write_png('knock44.png', prog='dot')
Esempio n. 8
0
            # textの初期化
            text = ""
            # 文節の取得
            for word in chunk.morphs:
                if word.surface == "。":
                    continue
                text += word.surface
            # textをkeyに係り先をvalueに格納
            dst_dict[text] = chunk.dst
            dst_list.append(text)
        for key, value in dst_dict.items():
            if value != -1:
                dot_pair.append([key, dst_list[value]])
        break

graph=  pydot.graph_from_edges(dot_pair, directed=True)
graph.write_png("result44.png")
img = Image.open("result44.png")
plt.imshow(np.asarray(img))

# 岸本

from IPython.display import Image, display_png
display_png(Image('result44.png')) #表示

#45 井原
def search(morphs):
    for morph in chunk.morphs:
        if morph.pos=="動詞":
            dousi=morph.base
            id_list=chunk.srcs
Esempio n. 9
0
File: 57.py Progetto: s14t284/NLP100
def draw_graph(array, i):
    g = pydot_ng.graph_from_edges(array)
    g.write_jpeg('graph/' + str(i) + '.jpg', prog='dot')
Esempio n. 10
0
def show(edges):
    g = pydot.graph_from_edges(edges, directed=True)
    g.write_png('result.png')
Esempio n. 11
0
                # 係り受け元の格納
                for i in range(len(chunks)):
                    if chunks[i].dst > -1:
                        chunks[chunks[i].dst].srcs.append(i)
                # 1文を格納
                return chunks
        else:
            # 形態素の情報を文節に格納
            speech = line[line.find('\t') + 1:].replace('\n', '').split(',')
            morph = Morph(line[0:line.find('\t')], speech[6], speech[0],
                          speech[1])
            chunk.morphs.append(morph)

    return chunks_list


if __name__ == '__main__':
    sentence = input()
    cabocha_tree = cabocha_parse(sentence)
    chunks = read_chunks(cabocha_tree)
    edges = []
    for i in range(len(chunks)):
        if chunks[i].dst > -1:
            edges.append([
                chunks[i].paragraphs_str(),
                chunks[chunks[i].dst].paragraphs_str()
            ])
    print(edges)
    g = pydot_ng.graph_from_edges(edges)
    g.write_jpeg('chunk_info_graph.jpg', prog='dot')