class PLPTestCase(unittest.TestCase): def setUp(self): self.plp = PLP('/usr/local/clp/lib/libclp_2.6.so') def test_ver(self): self.assertIsInstance(self.plp.ver() , unicode) def test_rec(self): self.assertEqual(self.plp.rec(u'żółwiem'), [18660912]) def test_orec(self): self.assertEqual(self.plp.rec(u'zolwiem'), []) self.assertEqual(self.plp.orec(u'zolwiem'), [18660912]) def test_bform(self): self.assertEqual(self.plp.bform(18660912), u'żółw') def test_label(self): self.assertEqual(self.plp.label(18660912)[0], PLP.CZESCI_MOWY.RZECZOWNIK) self.assertEqual(self.plp.label(self.plp.rec(u'idę')[0])[0], PLP.CZESCI_MOWY.CZASOWNIK) def test_ogonkify(self): self.assertItemsEqual(self.plp.ogonkify(u'gzo'), [u'gzó', u'gżo', u'gźo', u'gźó', u'gżó']) def test_forms(self): self.assertEqual(self.plp.forms(17786048), [ u'pogoda', u'pogody', u'pogodzie', u'pogodę', u'pogodą', u'pogodo', u'pogód', u'pogodom', u'pogodami', u'pogodach' ]) def test_vec(self): self.assertEqual(self.plp.vec(18660912, u'żółwiem')[0], 5)
else: with open('data/pap.txt', 'r') as f: pap = f.read().replace('\n', ' ').split('#') notes = {} for note in pap: if not re.search('\d{6}', note): continue note_id = re.findall('\d{6}', note)[0] note_content = re.sub(note_id, '', note).strip() words = pre_process(note_content).split(' ') for i, word in enumerate(words): if not p.rec(word): continue basic_form = p.bform(p.rec(word)[0]) if basic_form in stop_list: continue # words frequencies and total words number word = strip_sie(basic_form) if word in words_freq: words_freq[word] += 1 else: words_freq[word] = 1 total_no += 1 # co-occurence frequencies
#!/usr/bin/env python # encoding: utf-8 from plp import PLP p = PLP() VERB = PLP.CZESCI_MOWY.CZASOWNIK stimulus = u'fajka' st_forms = set(p.forms(p.rec(u'fajka')[0])) print st_forms snippets_count = 0 def parse_file(filename): global snippets_count with open(filename, 'r') as f: all_words = [] for line in f: words = line.strip().split() all_words.extend(words) stimulus_seen = False last_verb = None second_to_last_verb = None last_verb_index = 0 for i, word in enumerate(all_words): word_utf8 = word.decode('utf-8') if word_utf8 in st_forms or word_utf8[:-1] in st_forms: #print 'stimulus_seen'
class Test: """ Class responsible for running test against cities retrieved by TestPreparer """ trie_files = ['trie.bak', 'trie_only_nouns.bak', 'trie_nouns_and_adjectives.bak', 'trie_nouns_and_numerals.bak', 'trie_nouns_adjectives_and_numerals.bak'] # trie_files = ['trie.bak'] def __init__(self): self.plp = PLP() self.plp._init() print 'Initialized plp' self.cities = TestPreparer().start() # print 'Loaded cities: ', self.cities.__len__() def test(self): print 'Starting analysis' for trie_name in self.trie_files: print 'Starting', trie_name correct_number = 0 all_number = 0 s = Stemmer(self.plp, filename=trie_name, word_type=None) corrects_file = codecs.open('../wyniki/single_name/wies_miasto_kolonia_osada/success_' + trie_name.replace('bak', 'txt'), 'w', 'utf-8') result_file = codecs.open('../wyniki/single_name/wies_miasto_kolonia_osada/' + trie_name.replace('bak', 'txt'), 'w', 'utf-8') result_file.write(u'Dopełniacz;Mianownik;Wynik Stemmera\n') corrects_file.write(u'Dopełniacz;Mianownik;Wynik Stemmera\n') # for k, v in self.cities.iteritems(): cities = codecs.open('../data/cities_wies_miasto_kolonia_osada.csv', 'r', 'utf-8') for city in cities: k = city.split(';')[1].strip() v = city.split(';')[0].strip() all_number += 1 basic_form = '' # word_labels = [] # if k.__contains__('-'): # for city_parts in v.split('-'): # b = s.find_basic_form(city_parts) # basic_form += b.basic_form + '-' # word_labels.append(b.word_labels) # basic_form = basic_form[0:basic_form.__len__() - 1] # else: # for city_parts in v.split(' '): # b = s.find_basic_form(city_parts) # basic_form += b.basic_form + ' ' # word_labels.append(b.word_labels) basic_form = s.find_basic_form(v).basic_form.strip() if basic_form != k: # if basic_form == k: result_file.write(v + ';' + k + ';' + basic_form + ';') # for w_label in word_labels: # result_file.write(self.find_most_label(w_label) + ' ') result_file.write('\n') else: # corrects_file.write(v + ';' + k + ';' + basic_form + ';') # for label in s.find_labels(word_labels): # corrects_file.write(label + ' ') # corrects_file.write('\n') correct_number += 1 result_file.write(u'Liczba miejscowości;Liczba niepoprawnie rozpoznanych;Liczba poprawnie rozpoznanych\n') result_file.write( str(all_number) + ';' + str(all_number - correct_number) + ';' + str(correct_number)) print 'Done', trie_name def find_most_label(self, w_label): max_labels = dict() for word in w_label: for id in self.plp.rec(word): label = self.plp.label(id) if label in max_labels: max_labels[label] += 1 else: max_labels[label] = 1 return max(max_labels.iteritems(), key=operator.itemgetter(1))[0] def prepare_cities(self): print 'Preparing cities' res_file = codecs.open('../data/cities.csv', 'w', 'utf-8') res_file.write(u'Dopełniacz;Mianownik\n') for k, v in self.cities.iteritems(): res_file.write(v + ';' + k + '\n')