def test_read_proj_ds_tree(self): src_t = get_ds(self.inst2, trans(self.inst2)) tgt_w = lang(self.inst2) aln = get_trans_gloss_alignment(self.inst2) tgt_t = DepTree.fromstring(""" (ROOT[0] (glaubst[2] (Was[1]) (Du[3]) (wer[4]) (angerufen[5] (hat[6])) )) """, stype=DEPSTR_PTB) proj_t = project_ds(src_t, tgt_w, aln) self.assertTrue(proj_t.structurally_eq(tgt_t))
def split_instances(instances, train=0, dev=0, test=0): """ :type instances: list[RGIgt] """ # -- 0) Initialize the counter to keep track of which word index # is in which sentence. instances = list(instances) wc = WordCount() for i, inst in enumerate(instances): num_words = len(lang(inst)) SPLIT_LOG.debug('{} words in sentence {} (id {})'.format(num_words, i, inst.id)) wc.add(i, num_words) # -- 2) Figure out the number of words. num_train_words = round(train * wc.num_words) num_dev_words = round(dev * wc.num_words) num_test_words = round(test * wc.num_words) # -- 3) Get the word indices. train_word_index = num_train_words dev_word_index = train_word_index + num_dev_words test_word_index = dev_word_index + num_test_words # -- 4) Figure out which sentence indices these refer to. train_sent_index = wc.get_snt_from_wordnum(train_word_index) dev_sent_index = wc.get_snt_from_wordnum(dev_word_index) test_sent_index = wc.get_snt_from_wordnum(test_word_index) # -- 4) Now, split up the data. train_instances = instances[0:train_sent_index] dev_instances = instances[train_sent_index:dev_sent_index] test_instances = instances[dev_sent_index:test_sent_index+1] # And return... return train_instances, dev_instances, test_instances
def naacl_to_xigt(naacl_path): """ Convert the NAACL format to XIGT. :param naacl_path: """ content = open(naacl_path, 'r').read() # First, collect all the instances. instances = re.findall('Igt_id[\s\S]+?Q6.*Answer', content) xc = XigtCorpus() for instance_txt in instances: # id = re.search('Igt_id=([\S]+)', instance_txt).group(1) inst = Igt(id='i{}'.format(len(xc))) lang_raw, gloss_raw, trans_raw = instance_txt.split('\n')[1:4] # Now, create the raw tier... raw_tier = Tier(id=gen_tier_id(inst, 'r'), type='odin', attributes={STATE_ATTRIBUTE:RAW_STATE}) raw_tier.append(Item(id=ask_item_id(raw_tier), text=lang_raw, attributes={ODIN_TAG_ATTRIBUTE:ODIN_LANG_TAG})) raw_tier.append(Item(id=ask_item_id(raw_tier), text=gloss_raw, attributes={ODIN_TAG_ATTRIBUTE:ODIN_GLOSS_TAG})) raw_tier.append(Item(id=ask_item_id(raw_tier), text=trans_raw, attributes={ODIN_TAG_ATTRIBUTE:ODIN_TRANS_TAG})) inst.append(raw_tier) xc.append(inst) # Generate the clean/normal tiers, but without any cleaning. generate_normal_tier(inst, clean=False) # Lang Dependency representation handling... lang_ds_str = re.search('Q6:([\s\S]+?)Q6:', instance_txt).group(1) lang_ds_lines = lang_ds_str.split('\n')[5:-3] try: lang_dt = parse_naacl_dep(lang(inst), lang_ds_lines) create_dt_tier(inst, lang_dt, lang(inst), parse_method=INTENT_POS_MANUAL) except TreeError as te: pass except IndexError as ie: pass # Eng DS handling... eng_ds_str = re.search('Q3:([\s\S]+?)Q3:', instance_txt).group(1) eng_ds_lines = eng_ds_str.split('\n')[2:-3] try: eng_dt = parse_naacl_dep(trans(inst), eng_ds_lines) create_dt_tier(inst, eng_dt, trans(inst), parse_method=INTENT_POS_MANUAL) except TreeError as te: pass except IndexError as ie: pass except ValueError as ve: pass # Add Alignment... biling_aln_str = re.search('Q5:([\s\S]+?)Q5:', instance_txt).group(1) biling_aln_lines = biling_aln_str.split('\n')[4:-3] trans_offset = trans_raw.startswith(' ') gloss_offset = gloss_raw.startswith(' ') try: a = Alignment() for line in biling_aln_lines: gloss_s, trans_s = line.split()[0:2] if '.' in gloss_s: continue gloss_i = int(gloss_s) for trans_token in trans_s.split(','): trans_i = int(trans_token) if trans_i == 0: continue else: if trans_offset: trans_i -= 1 if gloss_offset: gloss_i -= 1 a.add((trans_i, gloss_i)) except: pass set_bilingual_alignment(inst, trans(inst), gloss(inst), a, aln_method=INTENT_ALN_MANUAL) return xc
def test_gloss_projection_unaligned(self): xc = xc_load(os.path.join(testfile_dir, "xigt/project_gloss_lang_tests.xml")) igt = xc[0] project_gloss_pos_to_lang(igt, tag_method=INTENT_POS_PROJ, unk_handling='keep') self.assertEqual('UNK', pos_tag_tier(igt, lang(igt).id, INTENT_POS_PROJ)[-1].value())
def convert_pml(aln_path, out_path, hindi=True): if hindi: igt_data = retrieve_hindi() else: igt_data = retrieve_naacl() a_root = load_xml(aln_path) doc_a = a_root.find(".//reffile[@name='document_a']").get('href') doc_b = a_root.find(".//reffile[@name='document_b']").get('href') doc_a = os.path.join(os.path.join(os.path.dirname(aln_path), doc_a)) doc_b = os.path.join(os.path.join(os.path.dirname(aln_path), doc_b)) # Load the sentences for each document. a_sents, a_glossed = load_sents(doc_a) b_sents, b_glossed = load_sents(doc_b) sent_alignments = a_root.findall(".//body/LM") assert (a_glossed and not b_glossed) or (b_glossed and not a_glossed), "Only one file should have glosses" xc = XigtCorpus() for sent_alignment in sent_alignments: # Get the sentence id... aln_id = sent_alignment.attrib.get('id') a_snt_id = re.search('^.+?-(.*)$', aln_id).group(1) if a_snt_id not in igt_data: continue # Get the text and tokens from the naacl data. pre_txt, lang_txt, gloss_txt, trans_txt = igt_data[a_snt_id] lang_tokens = lang_txt.split() gloss_tokens = gloss_txt.split() trans_tokens = trans_txt.split() a_snt_ref = sent_alignment.find('./tree_a.rf').text.split('#')[1] b_snt_ref = sent_alignment.find('./tree_b.rf').text.split('#')[1] word_alignments = sent_alignment.findall('./node_alignments/LM') a_snt, a_edges = a_sents[a_snt_ref] b_snt, b_edges = b_sents[b_snt_ref] assert isinstance(a_snt, Sentence) assert isinstance(b_snt, Sentence) # ------------------------------------------- # Skip sentences if they are not found for whatever reason # ------------------------------------------- if not a_snt or not b_snt: continue # ------------------------------------------- # Start constructing the IGT Instance. # ------------------------------------------- trans_snt, trans_indices = a_snt, a_edges gloss_snt, gloss_indices = b_snt, b_edges if a_glossed: trans_snt, trans_indices = b_snt, b_edges gloss_snt, gloss_indices = a_snt, a_edges # Hindi stuff... if hindi: lang_tokens = [w.text for w in gloss_snt] lang_postags = [w.pos for w in gloss_snt] lang_txt = ' '.join(lang_tokens) trans_tokens = [w.text for w in trans_snt] trans_postags = [w.pos for w in trans_snt] trans_txt = ' '.join(trans_tokens) gloss_tokens = [w.gloss if w.gloss else 'NULL' for w in gloss_snt] gloss_postags = lang_postags gloss_txt = ' '.join(gloss_tokens) inst = Igt(id=re.sub('s-', 'igt', a_snt_ref)) nt = Tier(type=ODIN_TIER_TYPE, id=NORM_ID, attributes={STATE_ATTRIBUTE:NORM_STATE}) ll = Item(id='n1', attributes={ODIN_TAG_ATTRIBUTE:ODIN_LANG_TAG}, text=lang_txt) gl = Item(id='n2', attributes={ODIN_TAG_ATTRIBUTE:ODIN_GLOSS_TAG}, text=gloss_txt) tl = Item(id='n3', attributes={ODIN_TAG_ATTRIBUTE:ODIN_TRANS_TAG}, text=trans_txt) nt.extend([ll,gl,tl]) inst.append(nt) # ------------------------------------------- # Handle the phrase tiers # ------------------------------------------- generate_lang_phrase_tier(inst) generate_trans_phrase_tier(inst) def process_postags(sent, tokens): postags = [] for i, token in enumerate(tokens): word = sent.getorder(i+1) if word is None: postags.append(None) else: postags.append(word.pos) return postags # ------------------------------------------- # Now, handle the translation words. # ------------------------------------------- tt = create_word_tier(ODIN_TRANS_TAG, trans_tokens, trans_phrase(inst)[0]) inst.append(tt) if not hindi: trans_postags = process_postags(trans_snt, trans_tokens) add_pos_tags(inst, tt.id, trans_postags, tag_method=INTENT_POS_MANUAL) # ------------------------------------------- # Handle the words tiers... # ------------------------------------------- wt = create_word_tier(ODIN_LANG_TAG, lang_tokens, lang_phrase(inst)[0]) gwt= create_word_tier(ODIN_GLOSS_TAG, gloss_tokens, gl) inst.extend([wt, gwt]) # Quickly set the alignment for the gloss words. for w, gw in zip(wt, gwt): gw.alignment = w.id if not hindi: lang_postags = process_postags(gloss_snt, gloss_tokens) gloss_postags = lang_postags add_pos_tags(inst, wt.id, lang_postags, tag_method=INTENT_POS_MANUAL) add_pos_tags(inst, gwt.id, gloss_postags, tag_method=INTENT_POS_MANUAL) create_dt_tier(inst, assemble_ds(gloss_snt, gloss_indices), wt, INTENT_DS_MANUAL) create_dt_tier(inst, assemble_ds(trans_snt, trans_indices), tt, INTENT_DS_MANUAL) # ------------------------------------------- # Now, the word alignments. # ------------------------------------------- a = Alignment() for word_alignment in word_alignments: a_ref = word_alignment.find('./a.rf').text.split('#')[1] b_ref = word_alignment.find('./b.rf').text.split('#')[1] a_word = a_snt.getid(a_ref) b_word = b_snt.getid(b_ref) if a_word is None or b_word is None: continue if not hindi: a_idx = a_word.order b_idx = b_word.order else: a_idx = a_snt.index(a_word)+1 b_idx = b_snt.index(b_word)+1 # Make sure the gloss is in the if a_glossed: trans_idx = b_idx lang_idx = a_idx else: trans_idx = a_idx lang_idx = b_idx a.add((trans_idx, lang_idx)) set_bilingual_alignment(inst, trans(inst), lang(inst), a, INTENT_ALN_MANUAL) set_bilingual_alignment(inst, trans(inst), gloss(inst), a, INTENT_ALN_MANUAL) xc.append(inst) with open(out_path, 'w', encoding='utf-8') as f: xigtxml.dump(f, xc)
def test_line_lengths(self): inst = self.xc[1] self.assertEqual(5, len(gloss(inst))) self.assertEqual(6, len(lang(inst)))