コード例 #1
0
    def __init__(self):

        freeling.util_init_locale("default")
        self.lang= "en"
        self.ipath="/usr/local"
        self.lpath=self.ipath + "/share/freeling/" + self.lang + "/"
        self.tk=freeling.tokenizer(self.lpath+"tokenizer.dat")
        self.sp=freeling.splitter(self.lpath+"splitter.dat")

        # create the analyzer with the required set of maco_options  
        self.morfo=freeling.maco(self.my_maco_options(self.lang,self.lpath));
        #  then, (de)activate required modules   
        self.morfo.set_active_options (False,  # UserMap 
                                  False,  # NumbersDetection,  
                                  True,  # PunctuationDetection,   
                                  False,  # DatesDetection,    
                                  True,  # DictionarySearch,  
                                  True,  # AffixAnalysis,  
                                  False, # CompoundAnalysis, 
                                  True,  # RetokContractions,
                                  False,  # MultiwordsDetection,  
                                  True,  # NERecognition,     
                                  False, # QuantitiesDetection,  
                                  True); # ProbabilityAssignment                 
        # create tagger
        self.tagger = freeling.hmm_tagger(self.lpath+"tagger.dat",True,2)


        # create sense annotator
        self.sen = freeling.senses(self.lpath+"senses.dat");
        # create sense disambiguator
        self.wsd = freeling.ukb(self.lpath+"ukb.dat");
        # create dependency parser
        self.parser = freeling.dep_treeler(self.lpath+"dep_treeler/dependences.dat");
コード例 #2
0
def process_file(essay_lst, x):
    index = 1
    for entry in essay_lst:
        # create tagger
        essay = entry[1]
        id = entry[0]
        tagger = freeling.hmm_tagger(lpath + "tagger.dat", True, 2)

        # create sense annotator
        sen = freeling.senses(lpath + "senses.dat")

        # create sense disambiguator
        wsd = freeling.ukb(lpath + "ukb.dat")

        # create dependency parser
        parser = freeling.chart_parser(lpath + "/chunker/grammar-chunk.dat")
        dep = freeling.dep_txala(lpath + "/dep_txala/dependences.dat",
                                 parser.get_start_symbol())

        # tokenize input line into a list of words
        lw = tk.tokenize(essay)
        # split list of words in sentences, return list of sentences
        ls = sp.split(lw)

        # perform morphosyntactic analysis and disambiguation
        ls = morfo.analyze(ls)
        ls = tagger.analyze(ls)

        # annotate and disambiguate senses
        ls = sen.analyze(ls)
        ls = wsd.analyze(ls)
        # parse sentences
        ls = parser.analyze(ls)
        ls = dep.analyze(ls)

        # do whatever is needed with processed sentences
        if x == 2:
            essays_vacation_tagged.append((id, ProcessSentences(ls)))
        elif x == 3:
            essays_famous_tagged.append((id, ProcessSentences(ls)))
        print(index)
        index += 1
コード例 #3
0
    def __init__(self):

        freeling.util_init_locale("default")
        self.lang = "en"
        self.ipath = "/usr/local"
        self.lpath = self.ipath + "/share/freeling/" + self.lang + "/"
        self.tk = freeling.tokenizer(self.lpath + "tokenizer.dat")
        self.sp = freeling.splitter(self.lpath + "splitter.dat")

        # create the analyzer with the required set of maco_options
        self.morfo = freeling.maco(self.my_maco_options(self.lang, self.lpath))
        #  then, (de)activate required modules
        self.morfo.set_active_options(
            False,  # UserMap 
            False,  # NumbersDetection,  
            True,  # PunctuationDetection,   
            False,  # DatesDetection,    
            True,  # DictionarySearch,  
            True,  # AffixAnalysis,  
            False,  # CompoundAnalysis, 
            True,  # RetokContractions,
            False,  # MultiwordsDetection,  
            True,  # NERecognition,     
            False,  # QuantitiesDetection,  
            True)
        # ProbabilityAssignment
        # create tagger
        self.tagger = freeling.hmm_tagger(self.lpath + "tagger.dat", True, 2)

        # create sense annotator
        self.sen = freeling.senses(self.lpath + "senses.dat")
        # create sense disambiguator
        self.wsd = freeling.ukb(self.lpath + "ukb.dat")
        # create dependency parser
        self.parser = freeling.dep_treeler(self.lpath +
                                           "dep_treeler/dependences.dat")
コード例 #4
0
    True,  # AffixAnalysis,  
    False,  # CompoundAnalysis, 
    True,  # RetokContractions,
    True,  # MultiwordsDetection,  
    True,  # NERecognition,     
    False,  # QuantitiesDetection,  
    True)
# ProbabilityAssignment

# create tagger
tagger = pyfreeling.hmm_tagger(lpath + "tagger.dat", True, 2)

# create sense annotator
sen = pyfreeling.senses(lpath + "senses.dat")
# create sense disambiguator
wsd = pyfreeling.ukb(lpath + "ukb.dat")
# create dependency parser
parser = pyfreeling.dep_treeler(lpath + "treeler/dependences.dat")

# process input text
text = "".join(sys.stdin.readlines())

# tokenize input line into a list of words
lw = tk.tokenize(text)
# split list of words in sentences, return list of sentences
ls = sp.split(lw)

# perform morphosyntactic analysis and disambiguation
ls = morfo.analyze(ls)
ls = tagger.analyze(ls)
# annotate and disambiguate senses
コード例 #5
0
    True,  # PunctuationDetection,   
    True,  # DatesDetection,    
    True,  # DictionarySearch,  
    True,  # AffixAnalysis,  
    False,  # CompoundAnalysis, 
    True,  # RetokContractions,
    True,  # MultiwordsDetection,  
    True,  # NERecognition,     
    True,  # QuantitiesDetection,  
    True)
# ProbabilityAssignment

# create tagger, sense anotator, and parsers
tg = pyfreeling.hmm_tagger(DATA + LANG + "/tagger.dat", True, 2)
sen = pyfreeling.senses(DATA + LANG + "/senses.dat")
wsd = pyfreeling.ukb(DATA + LANG + "/ukb.dat")
parser = pyfreeling.dep_lstm(DATA + LANG + "/dep_lstm/params-es.dat")

for filepath in os.listdir(ruta_archivos):
    file = os.path.join(ruta_archivos, filepath)

    process = Popen([argOE_script, 'rel', 'es', file], stdout=PIPE)
    (output, err) = process.communicate()
    exit_code = process.wait()
    print(output.decode('utf-8'))

    content = open(file, 'r').read()

    l = tk.tokenize(content)
    ls = sp.split(l)
    ls = mf.analyze(ls)