def convert_to_ktagger(path): file_name = os.path.basename(path) paragraphs = read_dag(path) # print(path, len(paragraphs)) for paragraph_index, paragraph in enumerate(paragraphs): if args.only_disamb: tokens = [token for token in paragraph[TOKENS] if is_disamb(token)] paragraph[TOKENS] = tokens paragraph_id = f"{corpus}▁{file_name}▁{paragraph_index}" ktext = KText(paragraph_id) years = paragraph[YEARS] year_feature = years[:2] ktext.year = year_feature text = original_text(paragraph) ktext.text = text dag_offsets(paragraph) for token in paragraph[TOKENS]: ktoken = KToken(token[SEGMENT], token[SPACE_BEFORE], token[START_OFFSET], token[END_OFFSET]) ktext.add_token(ktoken) ktoken.start_position = token[START_POSITION] ktoken.end_position = token[END_POSITION] for interpretation in token[INTERPRETATIONS]: disamb = 'disamb' in interpretation[DISAMB] if args.only_disamb and not disamb: continue manual = 'manual' in interpretation[DISAMB] kinterpretation = KInterpretation(interpretation[LEMMA], interpretation[TAG], disamb, manual) ktoken.add_interpretation(kinterpretation) assert text == ktext.infer_original_text() ktext.check_offsets() # print(ktext.save()) payload = json.loads(ktext.save2()) k = KText.load(payload) # print(k) # print(ktext.save()) # print(k.save()) assert ktext.save2() == k.save2() # print(payload) assert payload == ktext.save() yield ktext
def morfeusz_tokenize(text: str, original_ktext: KText): ktext = KText(original_ktext.id) ktext.text = text ktext.year = original_ktext.year output = morfeusz.analyse(text) for start_position, end_position, i in output: form, pseudo_lemma, combined_tags, _, _ = i lemma = regex.sub(r':[abcdijnopqsv][0-9]?$', '', pseudo_lemma) kinterpretation = KInterpretation(lemma, combined_tags, disamb=False, manual=False) if ktext.tokens and ktext.tokens[-1].start_position == start_position and ktext.tokens[ -1].end_position == end_position: ktext.tokens[-1].add_interpretation(kinterpretation) else: ktoken = KToken(form, space_before=None, start_offset=None, end_offset=None) ktoken.start_position = start_position ktoken.end_position = end_position ktoken.add_interpretation(kinterpretation) ktext.add_token(ktoken) return ktext
def read_xces(file_path: str, corpus_name: str = '', only_disamb: bool = False): paragraphs_defined = True ns = False # no separator first_chunk = True paragraph_index = 0 for event, elem in ET.iterparse(file_path, events=( "start", "end", )): if first_chunk and event == "start" and elem.tag in ('chunk', 'sentence'): if elem.get('type') == 's' or elem.tag == 'sentence': paragraphs_defined = False first_chunk = False elif event == "end" and elem.tag in ('chunk', 'sentence'): xml_sentences = [] paragraph = KText(f"{corpus_name}▁{file_path}▁{paragraph_index}") paragraph_index += 1 start_position = 0 if paragraphs_defined and elem.tag == 'chunk' and elem.get( 'type') != 's': xml_sentences = elem.getchildren() elif (not paragraphs_defined) and ( (elem.tag == 'chunk' and elem.get('type') == 's') or elem.tag == 'sentence'): xml_sentences = [elem] else: continue for sentence_index, xml_sentence in enumerate(xml_sentences): # sentence=Sentence() # paragraph.add_sentence(sentence) for token_index, xml_token in enumerate( xml_sentence.getchildren()): if xml_token.tag == 'ns': if token_index > 0 or sentence_index > 0: # omit first ns in paragraph ns = True elif xml_token.tag == 'tok': token = KToken(None, None, None, None, start_position=start_position, end_position=start_position + 1) start_position += 1 token.space_before = not ns for xml_node in xml_token.getchildren(): if xml_node.tag == 'orth': orth = xml_node.text if orth is not None: orth = orth.replace(' ', ' ') #a j e n t a token.form = orth elif xml_node.tag == 'lex': if xml_node.get('disamb') == '1': disamb = True else: disamb = False base = xml_node.find('base').text ctag = xml_node.find('ctag').text form = KInterpretation(base, ctag, disamb=False) if disamb: form.disamb = True form.manual = True #TODO # if token.gold_form is not None: # logging.warning(f'More than 1 disamb {file_path} {orth}') # token.gold_form=form if disamb or not only_disamb: token.add_interpretation(form) elif xml_node.tag == 'ann': continue else: logging.error('Error 1 {xml_token}') if token.form: paragraph.add_token(token) ns = False else: logging.error(f'Error 2 {xml_token}') paragraph.tokens[-1].sentence_end = True paragraph.text = paragraph.infer_original_text() paragraph.fix_offsets(paragraph.text) yield paragraph elem.clear()