def load_modules():
    global FREELING_MODULES
    freeling.util_init_locale("default")
    op = freeling.maco_options(FREELING_LANG)
    op.set_data_files("",
                      FREELING_DATA_DIR + "common/punct.dat",
                      FREELING_DATA_DIR + FREELING_LANG + "/dicc.src",
                      FREELING_DATA_DIR + FREELING_LANG + "/afixos.dat",
                      "",
                      FREELING_DATA_DIR + FREELING_LANG + "/locucions.dat",
                      FREELING_DATA_DIR + FREELING_LANG + "/np.dat",
                      FREELING_DATA_DIR + FREELING_LANG + "/quantities.dat",
                      FREELING_DATA_DIR + FREELING_LANG + "/probabilitats.dat")
    FREELING_MODULES["tk"] = freeling.tokenizer(FREELING_DATA_DIR + FREELING_LANG + "/tokenizer.dat")
    FREELING_MODULES["sp"] = freeling.splitter(FREELING_DATA_DIR + FREELING_LANG + "/splitter.dat")
    FREELING_MODULES["sid"] = FREELING_MODULES["sp"].open_session()
    FREELING_MODULES["mf"] = freeling.maco(op)
    FREELING_MODULES["mf"].set_active_options(False, True, True, True,
                               True, True, False, True,
                               True, True, True, True)
    FREELING_MODULES["tg"] = freeling.hmm_tagger(FREELING_DATA_DIR + FREELING_LANG + "/tagger.dat", True, 2)

    if os.path.isdir(FREELING_DATA_DIR + FREELING_LANG + "/chucker/grammar-chunk.dat"):
        FREELING_MODULES["parser"] = freeling.chart_parser(FREELING_DATA_DIR + FREELING_LANG
                                                           + "/chunker/grammar-chunk.dat")
示例#2
0
def process_file(essay_lst, x):
    index = 1
    for entry in essay_lst:
        # create tagger
        essay = entry[1]
        id = entry[0]
        tagger = freeling.hmm_tagger(lpath + "tagger.dat", True, 2)

        # create sense annotator
        sen = freeling.senses(lpath + "senses.dat")

        # create sense disambiguator
        wsd = freeling.ukb(lpath + "ukb.dat")

        # create dependency parser
        parser = freeling.chart_parser(lpath + "/chunker/grammar-chunk.dat")
        dep = freeling.dep_txala(lpath + "/dep_txala/dependences.dat",
                                 parser.get_start_symbol())

        # tokenize input line into a list of words
        lw = tk.tokenize(essay)
        # split list of words in sentences, return list of sentences
        ls = sp.split(lw)

        # perform morphosyntactic analysis and disambiguation
        ls = morfo.analyze(ls)
        ls = tagger.analyze(ls)

        # annotate and disambiguate senses
        ls = sen.analyze(ls)
        ls = wsd.analyze(ls)
        # parse sentences
        ls = parser.analyze(ls)
        ls = dep.analyze(ls)

        # do whatever is needed with processed sentences
        if x == 2:
            essays_vacation_tagged.append((id, ProcessSentences(ls)))
        elif x == 3:
            essays_famous_tagged.append((id, ProcessSentences(ls)))
        print(index)
        index += 1
示例#3
0
    True,
    True,
    True,  # select which among created
    True,
    True,
    True,
    True,  # submodules are to be used.
    False,
    False,
    True,
    True)  # default: all created submodules are used

# create tagger, sense anotator, and parsers
tg = pyfreeling.hmm_tagger(DATA + LANG + "/tagger.dat", True, 2)
sen = pyfreeling.senses(DATA + LANG + "/senses.dat")
parser = pyfreeling.chart_parser(DATA + LANG + "/chunker/grammar-chunk.dat")
dep = pyfreeling.dep_txala(DATA + LANG + "/dep_txala/dependences.dat",
                           parser.get_start_symbol())


def distribution(hospital_files_names):
    dist_hospitals = {}
    for name, files in hospital_file_names.items():
        if name != 'all':
            dist_hospitals[name] = len(files)

    return dist_hospitals


def file_size(hospitals_files_name, dir_annotated_corpora):
    file_size_hospitals = {}
示例#4
0
# create analyzers
tk=pyfreeling.tokenizer(DATA+LANG+"/tokenizer.dat");
sp=pyfreeling.splitter(DATA+LANG+"/splitter.dat");
sid=sp.open_session();
mf=pyfreeling.maco(op);

# activate mmorpho odules to be used in next call
mf.set_active_options(False, True, True, True,  # select which among created 
                      True, True, False, True,  # submodules are to be used. 
                      True, True, True, True ); # default: all created submodules are used

# create tagger, sense anotator, and parsers
tg=pyfreeling.hmm_tagger(DATA+LANG+"/tagger.dat",True,2);
sen=pyfreeling.senses(DATA+LANG+"/senses.dat");
parser= pyfreeling.chart_parser(DATA+LANG+"/chunker/grammar-chunk.dat");
dep=pyfreeling.dep_txala(DATA+LANG+"/dep_txala/dependences.dat", parser.get_start_symbol());

# process input text
lin=sys.stdin.readline();

print ("Text language is: "+la.identify_language(lin)+"\n");

while (lin) :
        
    l = tk.tokenize(lin);
    ls = sp.split(sid,l,False);

    ls = mf.analyze(ls);
    ls = tg.analyze(ls);
    ls = sen.analyze(ls);