Example #1
0
File: 44.py Project: tkyf/nlp100
def main():
    sentences = read_and_make_chunks()
    dots = []
    for chunks in sentences:
        dots.append(make_dot(chunks))

    for dot in dots:
        print(dot)
Example #2
0
def main():
    sentences = read_and_make_chunks()
    for chunks in sentences:
        for chunk in chunks:
            dst = chunk.dst
            if dst != -1 and '名詞' in chunk and '動詞' in chunks[dst]:
                src_phrase = make_phrase_exclude_symbols(chunk)
                dst_phrase = make_phrase_exclude_symbols(chunks[dst])
                print("{}\t{}".format(src_phrase, dst_phrase))
Example #3
0
File: 42.py Project: tkyf/nlp100
def main():
    sentences = read_and_make_chunks()
    for chunks in sentences:
        for chunk in chunks:
            src_phrase = make_phrase_exclude_symbols(chunk.morphs)
            if chunk.dst != -1:
                dst_phrase = make_phrase_exclude_symbols(chunks[chunk.dst].morphs)
            else:
                dst_phrase = ''
            print('{}\t{}'.format(src_phrase, dst_phrase))
Example #4
0
def main():
    sentences = read_and_make_chunks()
    for chunks in sentences:
        for chunk in chunks:
            src_phrase = make_phrase_exclude_symbols(chunk.morphs)
            if chunk.dst != -1:
                dst_phrase = make_phrase_exclude_symbols(
                    chunks[chunk.dst].morphs)
            else:
                dst_phrase = ''
            print('{}\t{}'.format(src_phrase, dst_phrase))
Example #5
0
File: 46.py Project: tkyf/nlp100
def main():
    sentences = read_and_make_chunks()
    for sentence in sentences:
        case_patterns = extract_case_frames(sentence)
        if case_patterns:
            print('\n'.join(case_patterns))
Example #6
0
File: 47.py Project: tkyf/nlp100
def main():
    sentences = read_and_make_chunks()
    for sentence in sentences:
        lvcs = extract_LVCs(sentence)
        if lvcs:
            print('\n'.join(lvcs))
Example #7
0
File: 47.py Project: tkyf/nlp100
def main():
    sentences = read_and_make_chunks()
    for sentence in sentences:
        lvcs = extract_LVCs(sentence)
        if lvcs:
            print('\n'.join(lvcs))
Example #8
0
def main():
    sentences = read_and_make_chunks()
    for chunks in sentences:
        for chunk in chunks:
            print(chunk)
Example #9
0
def main():
    sentences = read_and_make_chunks()
    for sentence in sentences:
        case_patterns = extract_case_patterns(sentence)
        if case_patterns:
            print('\n'.join(case_patterns))