forked from panx27/elisatools
-
Notifications
You must be signed in to change notification settings - Fork 0
/
extract_comparable.py
executable file
·224 lines (215 loc) · 9.5 KB
/
extract_comparable.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
import sys
# utilities for dealing with LRLPs
import argparse
import codecs
import os
import re
import os.path
from zipfile import ZipFile as zf
import xml.etree.ElementTree as ET
import gzip
import os
scriptdir = os.path.dirname(os.path.abspath(__file__))
import datetime
import subprocess
from subprocess import check_call, CalledProcessError
import shlex
from lputil import morph_tok, getgarbagemask
from itertools import compress
from collections import defaultdict as dd
def getclusters(indir):
''' get cluster mappings from xml files '''
data = dd(lambda: dd(set))
for filename in os.listdir(indir):
# assume ltf filename
if not filename.endswith(".xml"):
continue
# avoid mac meta stuff
if filename.startswith("."):
continue
with open(os.path.join(indir, filename), 'r') as ifh:
try:
xobj = ET.parse(ifh)
for cluster in xobj.findall(".//cluster"):
clid = cluster.get('id')
for doc in cluster.findall(".//doc"):
data[doc.get('language')][doc.get('docid')].add(clid)
except ET.ParseError:
sys.stderr.write("Parse error on "+filename+"\n")
continue
return data
def main():
parser = argparse.ArgumentParser(description="Extract and print comparable corpus " \
" data, tokenized, morph, pos tag and " \
"original, with manifests")
parser.add_argument("--rootdir", "-r", default=".",
help="root lrlp dir")
parser.add_argument("--outdir", "-o",
help="where to write extracted files")
parser.add_argument("--src", "-s", default='uzb',
help="source language 3 letter code")
parser.add_argument("--trg", "-t", default='eng',
help="target language 3 letter code")
parser.add_argument("--nogarbage", action='store_true', default=False,
help="turn off garbage filtering")
parser.add_argument("--toksubdir", default="raw.tokenized",
help="subdirectory for tokenized files")
parser.add_argument("--cleantoksubdir", default="tokenized",
help="subdirectory for cleaned ldc-tokenized files")
parser.add_argument("--cdectoksubdir", default="cdec-tokenized",
help="subdirectory for cdec-tokenized files")
parser.add_argument("--agiletoksubdir", default="agile-tokenized",
help="subdirectory for agile-tokenized files")
parser.add_argument("--morphtoksubdir", default="morph-tokenized",
help="subdirectory for tokenized files based on " \
"morphological segmentation")
parser.add_argument("--cleanorigsubdir", default="original",
help="subdirectory for cleaned raw original")
parser.add_argument("--morphsubdir", default="morph",
help="subdirectory for morphological information")
parser.add_argument("--origsubdir", default="raw.original",
help="subdirectory for untokenized files")
parser.add_argument("--garbagesubdir", default="garbage",
help="subdirectory for garbage files (under orig)")
parser.add_argument("--possubdir", default="pos",
help="subdirectory for pos tag files")
parser.add_argument("--cleanpath", default=os.path.join(scriptdir, 'clean.sh'),
help="path to cleaning script")
parser.add_argument("--agiletokenizer", default=os.path.join(scriptdir, 'agiletok.sh'),
help="path to agile tokenizer binary")
parser.add_argument("--cdectokenizer", default=os.path.join(scriptdir,
"cdectok.sh"),
help="cdec tokenizer program wrapper")
try:
args = parser.parse_args()
except IOError as msg:
parser.error(str(msg))
tokoutdir=os.path.join(args.outdir, args.toksubdir)
origoutdir=os.path.join(args.outdir, args.origsubdir)
cleantokoutdir=os.path.join(args.outdir, args.cleantoksubdir)
cleanorigoutdir=os.path.join(args.outdir, args.cleanorigsubdir)
cdectokoutdir=os.path.join(args.outdir, args.cdectoksubdir)
agiletokoutdir=os.path.join(args.outdir, args.agiletoksubdir)
morphtokoutdir=os.path.join(args.outdir, args.morphtoksubdir)
morphoutdir=os.path.join(args.outdir, args.morphsubdir)
posoutdir=os.path.join(args.outdir, args.possubdir)
cleanpath = args.cleanpath
dirs = [args.outdir,
tokoutdir,
cleantokoutdir,
cleanorigoutdir,
cdectokoutdir,
agiletokoutdir,
origoutdir,
morphtokoutdir,
morphoutdir,
posoutdir]
if args.nogarbage:
garbageoutdir = None
else:
garbageoutdir=os.path.join(origoutdir, args.garbagesubdir)
dirs.append(garbageoutdir)
for dir in dirs:
if not os.path.exists(dir):
os.makedirs(dir)
rootdir = os.path.join(args.rootdir, 'data', 'translation', 'comparable')
clusters = getclusters(os.path.join(rootdir, 'clusters'))
srcindir = os.path.join(rootdir, args.src, 'ltf')
trgindir = os.path.join(rootdir, args.trg, 'ltf')
datasets = [(args.src, srcindir, args.cdectokenizer, cdectokoutdir),
(args.trg, trgindir, args.agiletokenizer, agiletokoutdir)]
for lang, indir, exttokenizer, exttokoutdir in datasets:
inbase = lang
man_fh = open(os.path.join(args.outdir, "%s.manifest" % inbase),'w')
orig_fh = open(os.path.join(origoutdir, "%s.flat" % inbase), 'w')
if args.nogarbage:
garbage_fh = None
garbage_man_fh = None
else:
garbage_fh = open(os.path.join(garbageoutdir, "%s.flat" % inbase), 'w')
garbage_man_fh = open(os.path.join(garbageoutdir, "%s.manifest" % inbase),'w')
tok_fh = open(os.path.join(tokoutdir, "%s.flat" % inbase), 'w')
morphtok_fh = open(os.path.join(morphtokoutdir,
"%s.flat" % inbase), 'w')
morph_fh = open(os.path.join(morphoutdir, "%s.flat" % inbase), 'w')
pos_fh = open(os.path.join(posoutdir, "%s.flat" % inbase), 'w')
for filename in os.listdir(indir):
# assume ltf filename
if not filename.endswith("ltf.xml"):
continue
# avoid mac meta stuff
if filename.startswith("."):
continue
# print info.filename
with open(os.path.join(indir, filename), 'r') as ifh:
try:
xobj = ET.parse(ifh)
docid = xobj.findall(".//DOC")[0].get('id')
if len(clusters[lang][docid]) < 1:
sys.stderr.write("Warning: no clusters for %s\n" % docid)
clusid="NONE"
else:
clset = clusters[lang][docid]
if len(clset) > 1:
sys.stderr.write("Warning: multiple clusters for %s\n" % docid)
clusid = '_'.join(clset)
origlines = [ x.text+"\n" for x in xobj.findall(".//ORIGINAL_TEXT") ]
garbagemask = getgarbagemask(origlines, disabled=args.nogarbage)
goodmask = [not x for x in garbagemask]
seginfo = [ [ x.get(y) for y in ('id', 'start_char', 'end_char') ]
for x in xobj.findall(".//SEG") ]
for line in compress(origlines, garbagemask):
orig_fh.write(line)
for tup in compress(seginfo, garbagemask):
#TODO: get cluster ID!!!
man_fh.write("\t".join(map(str, [filename,docid]+tup+[clusid,]))+"\n")
if not args.nogarbage:
for line in compress(origlines, goodmask):
garbage_fh.write(line)
for tup in compress(seginfo, goodmask):
garbage_man_fh.write("\t".join(map(str, [filename,docid]+tup+[clusid,]))+"\n")
for x in compress(xobj.findall(".//SEG"), garbagemask):
tokens = x.findall(".//TOKEN")
toktext = []
morphtoktext = []
morphtext = []
postext = []
for y in tokens:
if y.text is None:
continue
toktext.append(y.text)
postext.append(y.get("pos") or "none")
for mt, mtt in morph_tok(y):
morphtext.append(mt)
morphtoktext.append(mtt)
tok_fh.write(' '.join(toktext)+"\n")
morphtok_fh.write(' '.join(morphtoktext)+"\n")
morph_fh.write(' '.join(morphtext)+"\n")
pos_fh.write(' '.join(postext)+"\n")
except ET.ParseError:
sys.stderr.write("Parse error on "+ifh.name+"\n")
continue
orig_fh.close()
tok_fh.close()
clean_orig = os.path.join(cleanorigoutdir, "%s.flat" % inbase)
clean_tok = os.path.join(cleantokoutdir, "%s.flat" % inbase)
for inclean, outclean in zip((orig_fh.name, tok_fh.name), (clean_orig, clean_tok)):
cleancmd = "{cmd} {inclean} {outclean}".format(cmd=cleanpath, inclean=inclean, outclean=outclean)
sys.stderr.write(cleancmd+"\n")
try:
check_call(shlex.split(cleancmd))
except CalledProcessError as e:
sys.stderr.write("Error code %d running %s\n" % (e.returncode, e.cmd))
sys.exit(1)
ext_cmd = "%s -i %s -o %s -t %s" % (exttokenizer,
orig_fh.name,
os.path.join(exttokoutdir,
"%s.flat.lc" % inbase),
os.path.join(exttokoutdir,
"%s.flat" % inbase))
p = subprocess.Popen(shlex.split(ext_cmd))
p.wait()
if __name__ == '__main__':
main()