def inicia(self): FREELINGDIR = "/usr/local" DATA = FREELINGDIR + "/share/freeling/" LANG = "es" freeling.util_init_locale("default") # create options set for maco analyzer. Default values are Ok, except for data files. op = freeling.maco_options("es") op.set_active_modules(0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0) op.set_data_files("", DATA + LANG + "/locucions.dat", DATA + LANG + "/quantities.dat", DATA + LANG + "/afixos.dat", DATA + LANG + "/probabilitats.dat", DATA + LANG + "/dicc.src", DATA + LANG + "/np.dat", DATA + "common/punct.dat", DATA + LANG + "/corrector/corrector.dat") # create analyzers self.tk = freeling.tokenizer(DATA + LANG + "/tokenizer.dat") self.sp = freeling.splitter(DATA + LANG + "/splitter.dat") self.mf = freeling.maco(op) self.tg = freeling.hmm_tagger("es", DATA + LANG + "/tagger.dat", 1, 2) self.sen = freeling.senses(DATA + LANG + "/senses.dat") ner = freeling.ner(DATA + LANG + "/ner/ner-ab.dat") self.parser = freeling.chart_parser(DATA + LANG + "/chunker/grammar-chunk.dat") self.dep = freeling.dep_txala(DATA + LANG + "/dep/dependences.dat", self.parser.get_start_symbol())
def prepare_freeling(): # Freeling: # https://github.com/TALP-UPC/FreeLing # (you may download binary at releases there) # (GPP: I'm using 4.0) # Make sure that the directory contanining libfreeling.so (FREELINGDIR/lib) is # in your LD_LIBRARY_PATH. # Make sure that freeling.py and _freeling.so are in the same directory as this one. # example of freeling's python API is at: https://github.com/TALP-UPC/FreeLing/tree/master/APIs/python # Change directories for your location FREELINGDIR = "/usr/local"; DATA = FREELINGDIR+"/share/freeling/"; LANG="pt"; freeling.util_init_locale("default"); # create options set for maco analyzer. Default values are Ok, except for data files. op= freeling.maco_options("pt"); op.set_data_files( "", DATA + "common/punct.dat", DATA + LANG + "/dicc.src", DATA + LANG + "/afixos.dat", "", DATA + LANG + "/locucions.dat", DATA + LANG + "/np.dat", "", # there's not "quantitites.dat" for pt DATA + LANG + "/probabilitats.dat"); # create analyzers tk=freeling.tokenizer(DATA+LANG+"/tokenizer.dat"); sp=freeling.splitter(DATA+LANG+"/splitter.dat"); sid=sp.open_session(); mf=freeling.maco(op); # activate mmorpho odules to be used in next call mf.set_active_options(False, True, True, True, # select which among created True, True, False, True, # submodules are to be used. True, True, True, True ); # default: all created submodules are used # create tagger, sense anotator, and ukb tg=freeling.hmm_tagger(DATA+LANG+"/tagger.dat",True,2); sen=freeling.senses(DATA+LANG+"/senses.dat"); parser= freeling.chart_parser(DATA+LANG+"/chunker/grammar-chunk.dat"); ukb = freeling.ukb(DATA+LANG+"/ukb.dat"); outputter = freeling.output_conll('./output_conll.dat') return tk, sp, sid, mf, tg, sen, parser, ukb, outputter
def __init__(self, text): super().__init__(text) freeling.util_init_locale("default") self.la = freeling.lang_ident(DATA + "common/lang_ident/ident.dat") op = freeling.maco_options("es") op.set_data_files( "", DATA + "common/punct.dat", DATA + LANG + "/dicc.src", DATA + LANG + "/afixos.dat", "", DATA + LANG + "/locucions.dat", DATA + LANG + "/np.dat", DATA + LANG + "/quantities.dat", DATA + LANG + "/probabilitats.dat" ) # create analyzers self.tk = freeling.tokenizer(DATA + LANG + "/tokenizer.dat") self.sp = freeling.splitter(DATA + LANG + "/splitter.dat") self.sid = self.sp.open_session() self.mf = freeling.maco(op) # activate mmorpho odules to be used in next call self.mf.set_active_options( False, # umap User map module True, # num Number Detection True, # pun Punctuation Detection True, # dat Date Detection True, # dic Dictionary Search True, # aff False, # com True, # rtk True, # mw Multiword Recognition True, # ner Name Entity Recongnition True, # qt Quantity Recognition True # prb Probability Assignment And Guesser ) # default: all created submodules are used # create tagger, sense anotator, and parsers self.tg = freeling.hmm_tagger(DATA + LANG + "/tagger.dat", True, 2) self.sen = freeling.senses(DATA + LANG + "/senses.dat") self.parser = freeling.chart_parser(DATA + LANG + "/chunker/grammar-chunk.dat") self.dep = freeling.dep_txala(DATA + LANG + "/dep_txala/dependences.dat", self.parser.get_start_symbol())
def process_list(student_list, prompt): for essay in student_list: # create tagger level = essay[0] text = essay[1] tagger = freeling.hmm_tagger(lpath + "tagger.dat", True, 2) # create sense annotator sen = freeling.senses(lpath + "senses.dat") # create sense disambiguator wsd = freeling.ukb(lpath + "ukb.dat") # create dependency parser parser = freeling.chart_parser(lpath + "/chunker/grammar-chunk.dat") dep = freeling.dep_txala(lpath + "/dep_txala/dependences.dat", parser.get_start_symbol()) # tokenize input line into a list of words lw = tk.tokenize(text) # split list of words in sentences, return list of sentences ls = sp.split(lw) # perform morphosyntactic analysis and disambiguation ls = morfo.analyze(ls) ls = tagger.analyze(ls) # annotate and disambiguate senses ls = sen.analyze(ls) ls = wsd.analyze(ls) # parse sentences ls = parser.analyze(ls) ls = dep.analyze(ls) # get the parsed essay text essay_parse = ProcessSentences(ls) #append tuple with level and parsed text to appropriate essay list if prompt == "V": essays_vacation_parsed.append((level, essay_parse)) elif prompt == "F": essays_famous_parsed.append((level, essay_parse))
def inicializa(self): FREELINGDIR = "/usr/local" DATA = FREELINGDIR + "/share/freeling/" LANG = self.lang freeling.util_init_locale("default") # create language analyzer self.la = freeling.lang_ident(DATA + "common/lang_ident/ident.dat") # opciones para maco analyzer. op = freeling.maco_options("es") op.set_active_modules(0, 1, 1, 1, 1, 1, 1, 1, 1, 1) op.set_data_files("", DATA + LANG + "/locucions.dat", DATA + LANG + "/quantities.dat", DATA + LANG + "/afixos.dat", DATA + LANG + "/probabilitats.dat", DATA + LANG + "/dicc.src", DATA + LANG + "/np.dat", DATA + "common/punct.dat", DATA + LANG + "/corrector/corrector.dat") # crear analyzers self.tk = freeling.tokenizer(DATA + LANG + "/tokenizer.dat") self.sp = freeling.splitter(DATA + LANG + "/splitter.dat") self.mf = freeling.maco(op) self.tg = freeling.hmm_tagger(DATA + LANG + "/tagger.dat", 1, 2) self.sen = freeling.senses(DATA + LANG + "/senses.dat") self.nec = freeling.nec(DATA + LANG + "/nerc/nec/nec-ab-rich.dat") # self.ner=freeling.nec(DATA+LANG+"/ner/ner-ab.dat"); self.parser = freeling.chart_parser(DATA + LANG + "/chunker/grammar-chunk.dat") self.dep = freeling.dep_txala(DATA + LANG + "/dep/dependences.dat", self.parser.get_start_symbol()) con_data={'user':'******','password':'******','host':'127.0.0.1', \ 'database':'agiria','raise_on_warnings': True, 'autocommit':True, 'buffered':True} self.con = my.connect(**con_data)
def process_file(essay_lst, x): for entry in essay_lst: # create tagger essay = entry[1] id = entry[0] tagger = freeling.hmm_tagger(lpath + "tagger.dat", True, 2) # create sense annotator sen = freeling.senses(lpath + "senses.dat") # create sense disambiguator wsd = freeling.ukb(lpath + "ukb.dat") # create dependency parser parser = freeling.chart_parser(lpath + "/chunker/grammar-chunk.dat") dep = freeling.dep_txala(lpath + "/dep_txala/dependences.dat", parser.get_start_symbol()) # tokenize input line into a list of words lw = tk.tokenize(essay) # split list of words in sentences, return list of sentences ls = sp.split(lw) # perform morphosyntactic analysis and disambiguation ls = morfo.analyze(ls) ls = tagger.analyze(ls) # annotate and disambiguate senses ls = sen.analyze(ls) ls = wsd.analyze(ls) # parse sentences ls = parser.analyze(ls) ls = dep.analyze(ls) # do whatever is needed with processed sentences if x == 2: essays_special_tagged.append((id, ProcessSentences(ls))) elif x == 3: essays_terrible_tagged.append((id, ProcessSentences(ls)))
def inicializa(self): FREELINGDIR = "/usr/local"; DATA = FREELINGDIR+"/share/freeling/"; LANG=self.lang; freeling.util_init_locale("default"); # create language analyzer self.la=freeling.lang_ident(DATA+"common/lang_ident/ident.dat"); # opciones para maco analyzer. op= freeling.maco_options("es"); op.set_active_modules(0,1,1,1,1,1,1,1,1,1) op.set_data_files("",DATA+LANG+"/locucions.dat", DATA+LANG+"/quantities.dat", DATA+LANG+"/afixos.dat", DATA+LANG+"/probabilitats.dat", DATA+LANG+"/dicc.src", DATA+LANG+"/np.dat", DATA+"common/punct.dat",DATA+LANG+"/corrector/corrector.dat"); # crear analyzers self.tk=freeling.tokenizer(DATA+LANG+"/tokenizer.dat"); self.sp=freeling.splitter(DATA+LANG+"/splitter.dat"); self.mf=freeling.maco(op); self.tg=freeling.hmm_tagger(DATA+LANG+"/tagger.dat",1,2); self.sen=freeling.senses(DATA+LANG+"/senses.dat"); self.nec=freeling.nec(DATA+LANG+"/nerc/nec/nec-ab-rich.dat"); # self.ner=freeling.nec(DATA+LANG+"/ner/ner-ab.dat"); self.parser= freeling.chart_parser(DATA+LANG+"/chunker/grammar-chunk.dat"); self.dep=freeling.dep_txala(DATA+LANG+"/dep/dependences.dat", self.parser.get_start_symbol()); con_data={'user':'******','password':'******','host':'127.0.0.1', \ 'database':'agiria','raise_on_warnings': True, 'autocommit':True, 'buffered':True} self.con = my.connect(**con_data)
def config_files(self, lang, data_dir, data_dir_common): data_dir += lang + "/" data_conf = data_dir + "nerc/nec/nec.cfg" opt = freeling.maco_options(lang) # (usr, pun, dic, aff, comp, loc, nps, qty, prb) opt.set_data_files("", data_dir_common + "punct.dat", data_dir + "dicc.src", data_dir + "afixos.dat", data_dir + "compounds.dat", data_dir + "locucions.dat", data_dir + "np.dat", data_dir + "quantities.dat", data_dir + "probabilitats.dat") self.mf = freeling.maco(opt) # (umap, num, pun, dat, dic, aff, comp, rtk, mw, ner, qt, prb) # (0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0) self.mf.set_active_options(False, True, True, True, False, True, True, True, True, True, True, True) self.tk = freeling.tokenizer(data_dir + "tokenizer.dat") self.sp = freeling.splitter(data_dir + "splitter.dat") self.tg = freeling.hmm_tagger(data_dir + "tagger.dat", True, 2) self.sen = freeling.senses(data_dir + "senses.dat") self.parser = freeling.chart_parser(data_dir + "chunker/grammar-chunk.dat") self.dep = freeling.dep_txala(data_dir + "/dep_txala/dependences.dat", self.parser.get_start_symbol()) self.nec = freeling.nec(data_conf)
DATA + LANG + "/locucions.dat", DATA + LANG + "/np.dat", DATA + LANG + "/quantities.dat", DATA + LANG + "/probabilitats.dat"); tk=freeling.tokenizer(DATA+LANG+"/tokenizer.dat"); sp=freeling.splitter(DATA+LANG+"/splitter.dat"); sid=sp.open_session(); mf=freeling.maco(op); mf.set_active_options(False, False, True, False, True, True, False, True, False, True, False, True ) tg=freeling.hmm_tagger(DATA+LANG+"/tagger.dat",True,2) sen=freeling.senses(DATA+LANG+"/senses.dat") parser= freeling.chart_parser(DATA+LANG+"/chunker/grammar-chunk.dat") dep=freeling.dep_txala(DATA+LANG+"/dep_txala/dependences.dat", parser.get_start_symbol()) process_file(input_training_file, output_training_file, [sid, tk, sp, mf, tg, sen, parser, dep]) process_file(input_testing_file, output_testing_file, [sid, tk, sp, mf, tg, sen, parser, dep]) process_file(input_pruebas_file, output_pruebas_file, [sid, tk, sp, mf, tg, sen, parser, dep]) input_training_file.close() input_pruebas_file.close() input_testing_file.close() output_pruebas_file.close() output_testing_file.close() output_training_file.close()
def __init__(self): lang = 'fr' ComplexityLanguage.__init__(self, lang) ## Modify this line to be your FreeLing installation directory FREELINGDIR = "/home/garciacumbreras18/dist/freeling" DATA = FREELINGDIR + "/data/" CLASSDIR = "" self.lang = lang freeling.util_init_locale("default") # create language analyzer self.la = freeling.lang_ident(DATA + "common/lang_ident/ident.dat") # create options set for maco analyzer. Default values are Ok, except for data files. op = freeling.maco_options(lang) op.set_data_files( "", DATA + "common/punct.dat", DATA + lang + "/dicc.src", DATA + lang + "/afixos.dat", "", DATA + lang + "/locucions.dat", DATA + lang + "/np.dat", DATA + lang + "/quantities.dat", DATA + lang + "/probabilitats.dat") # create analyzers self.tk = freeling.tokenizer(DATA + lang + "/tokenizer.dat") self.sp = freeling.splitter(DATA + lang + "/splitter.dat") self.mf = freeling.maco(op) # activate mmorpho modules to be used in next call self.mf.set_active_options( False, True, True, True, # select which among created True, True, False, True, # submodules are to be used. True, True, True, True) # default: all created submodules are used # create tagger and sense anotator self.tg = freeling.hmm_tagger(DATA + lang + "/tagger.dat", True, 2) self.sen = freeling.senses(DATA + lang + "/senses.dat") f = open(CLASSDIR + '/home/garciacumbreras18/DaleChall.txt') lines = f.readlines() f.close() listDaleChall = [] for l in lines: data = l.strip().split() listDaleChall += data self.listDaleChall = listDaleChall """ config es una lista de valores booleanos que activa o desactivan el cálculo de una medida config = [ True|False, # KANDEL MODELS True|False, # DALE CHALL True|False, # SOL ] """ self.config += [True, True, True] self.metricsStr.extend(['KANDEL-MODELS', 'DALE CHALL', 'SOL']) self.configExtend += [True, True] self.metricsStrExtend.extend(['MEAN RARE WORDS', 'STD RARE WORDS'])
True, True, True, # select which among created True, True, False, True, # submodules are to be used. True, True, True, True) # default: all created submodules are used # create tagger, sense anotator, and parsers tg = freeling.hmm_tagger(DATA + LANG + "/tagger.dat", True, 2) sen = freeling.senses(DATA + LANG + "/senses.dat") parser = freeling.chart_parser(DATA + LANG + "/chunker/grammar-chunk.dat") dep = freeling.dep_txala(DATA + LANG + "/dep_txala/dependences.dat", parser.get_start_symbol()) # process input text lin = sys.stdin.readline() print("Text language is: " + la.identify_language(lin, ["es", "ca", "en", "it"]) + "\n") while (lin): l = tk.tokenize(lin) ls = sp.split(sid, l, False)
def __init__(self, lang='it'): ## Modify this line to be your FreeLing installation directory FREELINGDIR = "/home/garciacumbreras18/dist/freeling" DATA = FREELINGDIR + "/data/" self.DATA = DATA self.lang = lang freeling.util_init_locale("default") # create language analyzer self.la = freeling.lang_ident(DATA + "common/lang_ident/ident.dat") # create options set for maco analyzer. Default values are Ok, except for data files. op = freeling.maco_options(lang) op.set_data_files("", self.DATA + "common/punct.dat", self.DATA + self.lang + "/dicc.src", self.DATA + self.lang + "/afixos.dat", "", self.DATA + self.lang + "/locucions.dat", self.DATA + self.lang + "/np.dat", "", self.DATA + self.lang + "/probabilitats.dat") # create analyzers self.tk = freeling.tokenizer(self.DATA + self.lang + "/tokenizer.dat") self.sp = freeling.splitter(self.DATA + self.lang + "/splitter.dat") self.mf = freeling.maco(op) # activate mmorpho modules to be used in next call self.mf.set_active_options( False, True, True, True, # select which among created True, True, False, True, # submodules are to be used. True, True, True, True) # default: all created submodules are used # create tagger self.tg = freeling.hmm_tagger(self.DATA + self.lang + "/tagger.dat", True, 2) self.sen = freeling.senses(DATA + lang + "/senses.dat") """ config es una lista de valores booleanos que activa o desactivan el cálculo de una medida config = [ True|False, # PUNCTUATION MARKS True|False, # SCI True|False, # ARI True|False, # MU True|False, # Flesch-Vaca True|False, # Gulpease ] Si config == None se calculan todas las métricas de complejidad soportadas """ self.config = [True, True, True, True, True, True] self.metricsIt = [ 'AVERAGE PUNCTUATION MARKS', 'SCI', 'ARI', 'MU', 'FLESCH-VACA', 'GULPEASE' ] self.configExtend = [True, True, True, True, True] self.metricsItExtend = [ 'MEAN WORDS', 'STD WORDS', 'COMPLEX SENTENCES', 'MEAN SYLLABLES', 'STD SYLLABLES' ]
def fullParsing(self, text, sentimentText): ## Modify this line to be your FreeLing installation directory FREELINGDIR = "/usr/local" DATA = FREELINGDIR + "/share/freeling/" LANG = "es" freeling.util_init_locale("default") # create language analyzer la = freeling.lang_ident(DATA + "common/lang_ident/ident.dat") # create options set for maco analyzer. Default values are Ok, except for data files. op = freeling.maco_options("es") op.set_data_files( "", DATA + "common/punct.dat", DATA + LANG + "/dicc.src", DATA + LANG + "/afixos.dat", "", DATA + LANG + "/locucions.dat", DATA + LANG + "/np.dat", DATA + LANG + "/quantities.dat", DATA + LANG + "/probabilitats.dat") # create analyzers tk = freeling.tokenizer(DATA + LANG + "/tokenizer.dat") sp = freeling.splitter(DATA + LANG + "/splitter.dat") sid = sp.open_session() mf = freeling.maco(op) # activate mmorpho odules to be used in next call mf.set_active_options( False, True, True, True, # select which among created True, True, False, True, # submodules are to be used. True, True, True, True) # default: all created submodules are used # create tagger, sense anotator, and parsers tg = freeling.hmm_tagger(DATA + LANG + "/tagger.dat", True, 2) sen = freeling.senses(DATA + LANG + "/senses.dat") parser = freeling.chart_parser(DATA + LANG + "/chunker/grammar-chunk.dat") dep = freeling.dep_txala(DATA + LANG + "/dep_txala/dependences.dat", parser.get_start_symbol()) #split Target as a list #print(sentimentText) sentimentText += '.' if sentimentText[0] == '@': sentimentText = sentimentText[1:] target = tk.tokenize(sentimentText) targets = sp.split(sid, target, True) targets = mf.analyze(targets) targets = parser.analyze(targets) targets = dep.analyze(targets) for s in targets: targetr = s.get_parse_tree() targetList = self.getTreeAsList(targetr, 0) del targetList[-1] #print(targetList) # process input text lin = text if lin[0] == '@': lin = lin[1:] #while (lin) : l = tk.tokenize(lin) ls = sp.split(sid, l, True) ls = mf.analyze(ls) ls = parser.analyze(ls) ls = dep.analyze(ls) finalType = None finalList = None ## output results for s in ls: tr = s.get_parse_tree() #self.printTree(tr, 0); wordType, wordList = self.getTypeNode(tr, 0, targetList) if finalType is None: if wordType is not None: finalType = wordType finalList = wordList # clean up sp.close_session(sid) return finalType, finalList
def __init__(self, text): super().__init__(text) self.stop_words = set(stopwords.words('spanish') + list(punctuation)) self._cleaned_text = list() freeling.util_init_locale("default") # create language analyzer la = freeling.lang_ident(DATA + "common/lang_ident/ident.dat") # create options set for maco analyzer. Default values are Ok, except for data files. op = freeling.maco_options("es") op.set_data_files( "", DATA + "common/punct.dat", DATA + LANG + "/dicc.src", DATA + LANG + "/afixos.dat", "", DATA + LANG + "/locucions.dat", DATA + LANG + "/np.dat", DATA + LANG + "/quantities.dat", DATA + LANG + "/probabilitats.dat") # create analyzers tk = freeling.tokenizer(DATA + LANG + "/tokenizer.dat") sp = freeling.splitter(DATA + LANG + "/splitter.dat") sid = sp.open_session() mf = freeling.maco(op) # activate mmorpho odules to be used in next call mf.set_active_options( True, True, True, True, # select which among created True, True, True, True, # submodules are to be used. True, True, True, True) # default: all created submodules are used # create tagger, sense anotator, and parsers tg = freeling.hmm_tagger(DATA + LANG + "/tagger.dat", True, 2) sen = freeling.senses(DATA + LANG + "/senses.dat") parser = freeling.chart_parser(DATA + LANG + "/chunker/grammar-chunk.dat") l = tk.tokenize(self.text) ls = sp.split(sid, l, False) ls = mf.analyze(ls) ls = tg.analyze(ls) ls = sen.analyze(ls) ls = parser.analyze(ls) for s in ls: ws = s.get_words() for w in ws: # Removing all stopped words, including prepositions, conjunctions, interjections and punctuation tag = w.get_tag() word = w.get_form() if tag.startswith("S") or \ tag.startswith("I") or \ tag.startswith("C") or \ tag.startswith("F") or \ tag.startswith("D") or \ tag.startswith("P"): pass else: self._cleaned_text.append("{}-{}".format(word, tag))
def tag(self): try: styles = self._styles.get() ppf = self._ppf.get() if self._only_completes.get() == 1: only_completes = True else: only_completes = False if self._webanno.get() == 1: webanno = True else: webanno = False except: messagebox.showerror( title="Ungültige Eingabe", message= """Bitte überprüfe, dass es sich bei deiner Eingabe in "Anzahl Sätze pro Datei" um eine ganzzahlige Zahl handelt.""" ) return None self._info.set("Starting...") self.root.update() # headers for the tsv if webanno: metadata_header = "webanno.custom.Metadata | Metadatavalue" lemma_header = "de.tudarmstadt.ukp.dkpro.core.api.segmentation.type.Lemma | value" pos_header = "de.tudarmstadt.ukp.dkpro.core.api.lexmorph.type.pos.POS | PosValue" new_pos_header = "webanno.custom.NewPOS | SavePOSValue" morpho_header = "webanno.custom.Morpho | MorphoValue" comment_header = "webanno.custom.Comments | Commentvalue" dep_header = "de.tudarmstadt.ukp.dkpro.core.api.syntax.type.dependency.Dependency | DependencyType | AttachTo=de.tudarmstadt.ukp.dkpro.core.api.lexmorph.type.pos.POS" hashtag = " # " # this needs to point to the freeling install directory FREELINGDIR = "/usr/local" DATA = FREELINGDIR + "/share/freeling/" LANG = "es" PATH = DATA + LANG + "/" freeling.util_init_locale("default") # create tokenizer and splitter tk = freeling.tokenizer(PATH + "tokenizer.dat") sp = freeling.splitter("RoSeData/no_splitter.dat") # a splitter is necessary for the process, sid = sp.open_session() # but our data is already split. no_splitter.dat tells the splitter to never split # create options set for maco analyzer. Default values are Ok, except for data files. op = freeling.maco_options("es") op.UserMapFile = "" op.LocutionsFile = PATH + "locucions.dat" op.AffixFile = PATH + "afixos.dat" op.ProbabilityFile = PATH + "probabilitats.dat" op.DictionaryFile = PATH + "dicc.src" op.NPdataFile = PATH + "np.dat" op.PunctuationFile = PATH + "../common/punct.dat" mf = freeling.maco(op) # activate morpho modules to be used in next call mf.set_active_options( False, True, True, True, # select which among created True, True, False, True, # submodules are to be used. True, True, False, True) # default: all created submodules are used # create tagger self._info.set("Generiere Tagger...") self.root.update() tg = freeling.hmm_tagger(PATH + "tagger.dat", True, 2) # create sense annotator and disambiguator self._info.set("Generiere sense disambiguator...") self.root.update() sen = freeling.senses(PATH + "senses.dat") wsd = freeling.ukb(PATH + "ukb.dat") # create parser self._info.set("Generiere dependency parser...") self.root.update() parser = freeling.dep_treeler(PATH + "dep_treeler/dependences.dat") # keep track of how many sentences were counted sent_counter = 0 # keep track of documents created doc_counter = 0 webanno_sent_counter = 0 outputter = freeling.output_conll() # Write headers outf = open("output/" + self._outF.get() + ".xml", encoding='utf-8', mode='w') outf.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n") outf.write("<corpus>\n") # Start Tagging Process try: iterate_docs = ET.iterparse(self._indir.get(), events=("end", ), tag="document") except: messagebox.showerror( title="Ungültiger Dateipfad", message= "Unter dem angegebenen Dateipfad konnte keine XMl-Datei gefunden werden." ) self._info.set("Process stopped.") self.root.update() return None for action, doc in iterate_docs: # iterate all fileElems if True: # filter in case you only want certain docs self._info.set("Dokument {} wird bearbeitet...".format( doc.attrib["file"])) self.root.update() # filter out all unwanted phrases if styles == 'all' and only_completes == True: phrases = doc.xpath('phrase[contains(@complete,"yes")]') elif styles == 'all' and only_completes == True: phrases = doc.xpath('phrase') elif styles != 'all' and only_completes == True: phrases = doc.xpath( 'phrase[contains(@complete,"yes") and contains(@style,"' + styles + '")]') else: phrases = doc.xpath('phrase[contains(@style,"' + styles + '")]') for phrase in phrases: phrasetext = phrase.text lw = tk.tokenize(phrasetext) ls = sp.split(sid, lw, True) ls = mf.analyze(ls) ls = tg.analyze(ls) ls = sen.analyze(ls) wsdis = wsd.analyze(ls) dep = parser.analyze(wsdis) if webanno: # open a new tsv file if number of phrases is reached if sent_counter % ppf == 0: if doc_counter != 0: conllout.close() doc_counter += 1 conllout = open(self._outF.get() + '-' + str(doc_counter) + '.tsv', encoding='utf-8', mode='w') tsvwriter = csv.writer(conllout, delimiter='\t') # implement headers tsvwriter.writerow([ hashtag + metadata_header + hashtag + lemma_header + hashtag + pos_header + hashtag + new_pos_header + hashtag + morpho_header + hashtag + comment_header + hashtag + dep_header ]) webanno_sent_counter = 0 if webanno_sent_counter != 0: tsvwriter.writerow([]) tsvwriter.writerow( ["#id=" + str(webanno_sent_counter)]) word_counter = 1 sent_counter += 1 self._info2.set( str(sent_counter) + " Sätze wurden analysiert!") self.root.update() conllstr = outputter.PrintResults(dep) tokens_in_sent = conllstr.splitlines() # a clunky way to get the treedata depdict = {} for token in tokens_in_sent: if len(token) > 1: elements = token.split() depdict[elements[0]] = [ elements[1], elements[9], elements[10] ] for sentence in ls: sent_all_info = [] #only needed for the AfterFilter for word in sentence.get_words(): dictentry = depdict[str(word_counter)] if dictentry[0] != word.get_form(): print( "An error occured! Please check this phrase:", phrasetext) if dictentry[1] == "0": dictentry[1] = str(word_counter) # we give the metadata to the phrase by storing it as a layer in the first token if word_counter == 1: doc = phrase.getparent() docname = doc.attrib["file"] webanno_metadata = os.path.basename( self._indir.get() ) + ", " + docname + ", " + phrase.attrib["id"] else: webanno_metadata = "_" tokenElem = ET.SubElement(phrase, 'token', id=str(word_counter), lemma=word.get_lemma(), pos=word.get_tag(), dep_tag=dictentry[2], dep_parent=dictentry[1]) tokenElem.text = word.get_form() if webanno: #save all info as a tuple similar to webanno/conll-Format all_info = (word.get_form(), webanno_metadata, word.get_lemma(), word.get_tag(), dictentry[2], dictentry[1]) sent_all_info.append(all_info) word_counter += 1 if webanno: allowed = self._AfterFilter( sent_all_info) #filter the phrases if allowed: webanno_sent_counter += 1 this_word_counter = 1 # finally write the phrases to the tsv for element in sent_all_info: tsvwriter.writerow([ str(webanno_sent_counter) + "-" + str(this_word_counter), element[0], element[1], element[2], element[3], "_", "_", "O", element[4], str(webanno_sent_counter) + "-" + element[5] ]) this_word_counter += 1 # write docElem docString = ET.tostring(doc, encoding='unicode', pretty_print=True) outf.write(docString) doc.clear() # Also eliminate now-empty references from the root node to elem for ancestor in doc.xpath('ancestor-or-self::*'): while ancestor.getprevious() is not None: del ancestor.getparent()[0] doc.getparent().remove(doc) outf.write("</corpus>") outf.close() del iterate_docs if webanno: conllout.close() sp.close_session(sid) self._info.set("Tagging erfolgreich beendet.") self.root.update()