def context_morphs(all_sentence, word_list): num = re.compile(r'[0-9]') for word in word_list: chunk_list = module(word) for sent in chunk_list: for j in range(len(sent)): context = [] for k in sent[j].morph: if k.pos == '名詞': context.append(k.surface) else: if len(context) >= 2: a = ' '.join(context) if a in nouns_100: for b in sent[int(sent[j].dst)].morph: if b.pos == '名詞' or b.pos == '動詞' or b.pos == '形容詞': print a + '\t -> ' + b.base for c in sent[j].srcs: for d in sent[int(c)].morph: if b.pos == '名詞' or b.pos == '動詞' or b.pos == '形容詞': print a + '\t <- ' + d.base context = [] if len(context) >= 2: a = ' '.join(context) if a in nouns_100: for i in sent[int(sent[j].dst)].morph: if b.pos == '名詞' or b.pos == '動詞' or b.pos == '形容詞': print a + '\t -> ' + b.base
def noun_verb(FILE): chunk_list = module(FILE) for chunk in chunk_list: for i in range(len(chunk)): if chunk[i].dst != '-1': noun, verb = '', '' noun_check, verb_check = 0, 0 for j1 in chunk[i].morphs: if j1.surface != '、 ' and j1.surface != '。 ': noun += j1.surface if j1.pos == '名詞': noun_check = 1 if noun_check == 0: continue for j2 in chunk[int(chunk[i].dst)].morphs: if j2.surface != '、 ' and j1.surface != '。 ': verb += j2.surface if j1.pos == '動詞': verb_check = 1 if verb_check == 0: continue print noun + '\t' + verb + '\n'
def main(): print module.__dict__ print "**********start *************" m=module() print m # print "general method in python other package" # m.details() print "*********Eastablish data connection *******" conn=module.connection() print conn
def tab_extract(FILE): chunk_list = module(FILE) for chunk in chunk_list: for i in range(len(chunk)): if chunk[i].dst != '-1': moto, saki = '', '' for j1 in chunk[i].morphs: if j1.pos != '記号': moto += j1.surface for j2 in chunk[int(chunk[i].dst)].morphs: if j2.pos != '記号': saki += j2.surface print moto + '\t' + saki + '\n'
def more_than_2_phrase(FILE): chunk_list = module(FILE) for chunk in chunk_list: for i in range(len(chunk)): if len(chunk[i].srcs) >= 2: print chunk[i].srcs saki = '' for j in chunk[i].morphs: saki += j.surface print saki + '\n' for src in chunk[i].srcs: moto = '' for k in chunk[int(src)].morphs: moto += k.surface print moto
def phrase_expression(FILE): chunk_list = module(FILE) chunk1, chunk2 = '', '' for chunk1 in chunk_list: for chunk2 in chunk_list: for i in range(len(chunk1)): for j in range(len(chunk2)): if chunk1[i].dst == chunk2[j].num: for j1 in chunk1[i].morphs: for j2 in chunk2[j].morphs: if j1.pos == '名詞' and j2.pos == '名詞': print j1.surface + '\t' + j2.surface + '\n' print chunk2[j].num + '->' + chunk1[i].num
def load_shelve (file_name, progress_bar=None): ''' Load a module from disk. @see: dump_shelve() @type name: String @param name: File name to import from @rtype: pida.module @return: Imported module ''' import shelve sh = shelve.open(file_name, flag='r', protocol=2) mod = module() # attributes from pida.module mod.name = sh["name"] mod.base = sh["base"] mod.depth = sh["depth"] mod.analysis = sh["analysis"] mod.signature = sh["signature"] mod.version = sh["version"] mod.ext = sh["ext"] # attributes inherited from pgraph.graph mod.id = sh["id"] mod.clusters = sh["clusters"] mod.edges = sh["edges"] mod.nodes = {} # we restore the node dictionary piece by piece to avoid out of memory conditions. for key, val in sh["nodes"].items(): mod.nodes[key] = val return
pre_w = 'None' pre_pos = 'None' for one_chunk in one_sentence: if one_chunk.chunk == 'B-NP': if word.startswith('#'): make_feature(word, features, one_chunk.w, one_chunk.pos, pre_pos) head_w = find_head(one_chunk.w.lower()) features = feature(pre_w, pre_pos, head_w, one_chunk.pos) word = '# ' + one_chunk.w elif one_chunk.chunk == 'I-NP' and word != ' ': if features.head_w != 'NONE': features.f_pos = one_chunk.pos word += ' ' + one_chunk.w elif word != ' ': make_feature(word, features, one_chunk.w, one_chunk.pos, pre_pos) word = ' ' else: pass pre_w = one_chunk.w pre_pos = one_chunk.pos if __name__ == '__main__': for name in glob.glob('8_071_GENIA_tagger_?.txt'): result = open('8_076_output' + name[-5] + '.f', 'w') 11feature(module(name)) result.close()
for k in sent[j].morph: if k.pos == '名詞': context.append(k.surface) else: if len(context) >= 2: a = ' '.join(context) if a in nouns_100: for b in sent[int(sent[j].dst)].morph: if b.pos == '名詞' or b.pos == '動詞' or b.pos == '形容詞': print a + '\t -> ' + b.base for c in sent[j].srcs: for d in sent[int(c)].morph: if b.pos == '名詞' or b.pos == '動詞' or b.pos == '形容詞': print a + '\t <- ' + d.base context = [] if len(context) >= 2: a = ' '.join(context) if a in nouns_100: for i in sent[int(sent[j].dst)].morph: if b.pos == '名詞' or b.pos == '動詞' or b.pos == '形容詞': print a + '\t -> ' + b.base if __name__ == '__main__': word_list = make_word_list() for num in glob.glob('7_061_output_?.txt'): all_sentence = module(num) context_morphs(all_sentence, word_list)
def a_the_none(all_sentence): for one_sentence in all_sentence: word = '' pre_w = 'None' pre_pos = 'None' for one_chunk in one_sentence: if one_chunk.chunk == 'B-NP': if word.startswith('#'): make_feature(word, features, one_chunk.w, one_chunk.pos, pre_pos) head_w = find_head(one_chunk.w.lower()) features = feature(pre_w, pre_pos, head_w, one_chunk.pos) word = '# ' + one_chunk.w elif one_chunk.chunk == 'I-NP' and word != '': if features.head_w != 'NONE': features.f_pos = one_chunk.pos word += ' ' + one_chunk.w elif word != ' ': make_feature(word, features, one_chunk.w, one_chunk.pos, pre_pos) word = ' ' else: pass pre_w = one_chunk.w pre_pos = one_chunk.pos if __name__ == '__main__': a_the_none(module(sys.argv[1]))
def _start(module, args=None): module(args)
from module import * m = module(".bashrc") print(m) print m.commentCharacter print m.activeFile print m.homePath m.set_file(".vimrc") print m.activeFile m.set_file(".bashrc") print (m.activeFile) #print(m.check_for_file()) #m.append_to_file("DELETEME") #m.uncomment_line("ben=") m.backup_file(".origin")
#!/usr/bin/env python # coding: utf-8 from module import * import sys def np(all_sentence): for one_sentence in all_sentence: word = '' for one_chunk in one_sentence: if one_chunk.chunk == 'B-NP': if word.startswith('#'): print word word = '# ' + one_chunk.w elif one_chunk.chunk == 'I-NP' and word != ' ': word += ' ' + one_chunk.w elif word != ' ': print word word = ' ' else: pass if __name__ == '__main__': np(module(sys.argv[1]))
def labels(): text = module('Cristiano',10) return str(text)
app = Flask(__name__) @app.route('/') def hello_world(): return """ <!DOCTYPE html> <head> <title> API Exercise pt. 2 </title> </head> <body> <p> Andre's Code is being tested with the twitter handle 'Cristiano' and a number of 10 tweets </p> <p> To view the labels returned by the Google Cloud Vision API, type '/labels' after the current URL </p> <p> OUTPUT VIDEO as a .mp4 file: </p> <video controls> <source src="/output.mp4" type="video/mp4"> </video> </body> """ #only way i could think of printing a result from a module @app.route("/labels") def labels(): text = module('Cristiano',10) return str(text) if __name__ == '__main__': text = module('Cristiano',10) app.run(debug = True, use_reloader = True)