/
swe_pipeline.py
executable file
·249 lines (201 loc) · 7.99 KB
/
swe_pipeline.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
#!/usr/bin/env python3
"""Tokenize, tag and parse Swedish plain text data.
This was originally the pipeline by Filip Salomonsson for the Swedish
Treebank (using hunpos for tagging), later modified by Robert Östling to use
efselab and Python 3.
"""
import os
import shutil
import sys
import tempfile
import re
from subprocess import Popen
import gzip
from commandline import create_parser, validate_options
from conll import tagged_to_tagged_conll
from lemmatize import SUCLemmatizer
from tagger import SucTagger, SucNETagger, UDTagger
from tokenizer import build_sentences
__authors__ = """
Filip Salomonsson <filip.salomonsson@gmail.com>
Robert Östling <robert.ostling@helsinki.fi>
Aaron Smith <aaron.smith@lingfil.uu.se>
"""
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
MAX_TOKEN = 256
def main():
parser = create_parser()
options, args = parser.parse_args()
validate_options(options, args)
run_pipeline(options, args)
def run_pipeline(options, args):
models = {
"suc_ne_tagger": None,
"suc_tagger": None,
"ud_tagger": None,
"lemmatizer": None,
}
if options.tagged or options.ner or options.parsed:
models["suc_tagger"] = SucTagger(options.tagging_model)
if options.lemmatized:
models["ud_tagger"] = UDTagger(options.ud_tagging_model)
if options.ner:
models["suc_ne_tagger"] = SucNETagger(options.ner_model)
# Set up the working directory
tmp_dir = tempfile.mkdtemp("-stb-pipeline")
if options.parsed:
shutil.copy(
os.path.join(SCRIPT_DIR, options.parsing_model + ".mco"),
tmp_dir
)
if options.lemmatized:
models["lemmatizer"] = SUCLemmatizer()
models["lemmatizer"].load(options.lemmatization_model)
# Process each input file
for filename in args:
process_file(
options,
filename,
tmp_dir,
models,
(True if options.non_capitalized else None)
)
cleanup(options, tmp_dir)
def process_file(options, filename, tmp_dir, models, non_capitalized=None):
print("Processing %s..." % filename, file=sys.stderr)
tokenized_filename = output_filename(tmp_dir, filename, "tok")
tagged_filename = output_filename(tmp_dir, filename, "tag")
ner_filename = output_filename(tmp_dir, filename, "ne")
sentences = run_tokenization(options, filename,
non_capitalized=non_capitalized)
annotated_sentences = []
with open(tokenized_filename, "w", encoding="utf-8") as tokenized, \
open(tagged_filename, "w", encoding="utf-8") as tagged, \
open(ner_filename, "w", encoding="utf-8") as ner:
# Run only one pass over sentences for writing to both files
for sentence in sentences:
write_to_file(tokenized, sentence)
if options.tagged or options.parsed or options.ner:
lemmas, ud_tags_list, suc_tags_list, suc_ne_list = \
run_tagging_and_lemmatization(options, sentence, models)
annotated_sentences.append(
zip(sentence, lemmas, ud_tags_list, suc_tags_list)
)
ud_tag_list = [
ud_tags[:ud_tags.find("|")]
for ud_tags in ud_tags_list
]
if lemmas and ud_tags_list:
line_tokens = sentence, suc_tags_list, ud_tag_list, lemmas
else:
line_tokens = sentence, suc_tags_list
lines = ["\t".join(line) for line in zip(*line_tokens)]
write_to_file(tagged, lines)
if options.ner:
ner_lines = [
"\t".join(line)
for line in zip(sentence, suc_ne_list)
]
write_to_file(ner, ner_lines)
parsed_filename = ""
if options.parsed:
parsed_filename = parse(
options, filename, annotated_sentences, tmp_dir
)
write_to_output([
(options.tokenized, tokenized_filename, options.output_dir),
(options.tagged, tagged_filename, options.output_dir),
(options.parsed, parsed_filename, options.output_dir),
(options.ner, ner_filename, options.output_dir),
])
print("done.", file=sys.stderr)
def run_tokenization(options, filename, non_capitalized=None):
with (gzip.open(filename, "rt", encoding="utf-8")
if filename.endswith(".gz")
else open(filename, "r", encoding="utf-8")) as input_file:
data = input_file.read()
if options.skip_tokenization:
sentences = [
sentence.split('\n')
for sentence in data.split('\n\n')
if sentence.strip()
]
elif options.skip_segmentation:
sentences = [
build_sentences(line, segment=False)
for line in data.split('\n')
if line.strip()
]
else:
if non_capitalized is None:
n_capitalized = len(re.findall(r'[\.!?] +[A-ZÅÄÖ]', data))
n_non_capitalized = len(re.findall(r'[\.!?] +[a-zåäö]', data))
non_capitalized = n_non_capitalized > 5*n_capitalized
sentences = build_sentences(data, non_capitalized=non_capitalized)
sentences = list(filter(bool,
[[token for token in sentence if len(token) <= MAX_TOKEN]
for sentence in sentences]))
return sentences
def run_tagging_and_lemmatization(options, sentence, models):
lemmas = []
ud_tags_list = []
suc_tags_list = models["suc_tagger"].tag(sentence)
suc_ne_list = []
if options.lemmatized:
lemmas = [
models["lemmatizer"].predict(token, tag)
for token, tag in zip(sentence, suc_tags_list)
]
ud_tags_list = models["ud_tagger"].tag(sentence, lemmas, suc_tags_list)
if options.ner:
suc_ne_list = models["suc_ne_tagger"].tag(
list(zip(sentence, lemmas, suc_tags_list))
)
return lemmas, ud_tags_list, suc_tags_list, suc_ne_list
def parse(options, filename, annotated_sentences, tmp_dir):
tagged_conll_filename = output_filename(tmp_dir, filename, "tag.conll")
parsed_filename = output_filename(tmp_dir, filename, "conll")
log_filename = output_filename(tmp_dir, filename, "log")
# The parser command line is dependent on the input and
# output files, so we build that one for each data file
parser_cmdline = [
"java",
"-Xmx2000m",
"-jar", os.path.expanduser(options.malt),
"-m", "parse",
"-i", tagged_conll_filename,
"-o", parsed_filename,
"-w", tmp_dir,
"-c", os.path.basename(options.parsing_model)
]
# Conversion from .tag file to tagged.conll (input format for the parser)
tagged_conll_file = open(tagged_conll_filename, "w", encoding="utf-8")
tagged_to_tagged_conll(annotated_sentences, tagged_conll_file)
tagged_conll_file.close()
# Run the parser
with open(log_filename, "w", encoding="utf-8") as log_file:
returncode = Popen(
parser_cmdline, stdout=log_file, stderr=log_file
).wait()
if returncode:
sys.exit("Parsing failed! See log file: %s" % log_filename)
return parsed_filename
def write_to_file(file, lines):
for line in lines:
print(line, file=file)
print(file=file)
def write_to_output(filename_mapping):
for should_copy, filename, output_dir in filename_mapping:
if should_copy:
shutil.copy(filename, output_dir)
def cleanup(options, tmp_dir):
if not options.no_delete:
shutil.rmtree(tmp_dir)
else:
print("Leaving working directory as is: %s" % tmp_dir, file=sys.stderr)
def output_filename(tmp_dir, filename, suffix):
directory, _ = os.path.splitext(filename)
basename = os.path.basename(directory)
return os.path.join(tmp_dir, "%s.%s" % (basename, suffix))
if __name__ == '__main__':
main()