-
Notifications
You must be signed in to change notification settings - Fork 0
/
create_input_data.py
executable file
·536 lines (449 loc) · 20.3 KB
/
create_input_data.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
#!/usr/bin/python2
# -*- coding: utf-8 -*-
'''
TODO:
- Make system more flexible / variable (to use with other languages).
- Allow proper use of single language (-s option) data creation.
- Transfer code to helpers.py or parameters.py.
'''
from argparse import ArgumentParser
from collections import defaultdict
from itertools import combinations
from os import sep, makedirs
from os.path import exists
import subprocess
from sys import exit, stderr
from nltk.corpus import stopwords
from nltk import word_tokenize
from composes.semantic_space.space import Space
from composes.utils import io_utils
from lib.ttpw.treetaggerwrapper import TreeTagger
from helpers import LONG_LANGTAG, getTag, Suffixes, InputFilenames, \
OutputFilenames
from parameters import LANG_1, LANG_2, SENTENCES_LIMIT, \
MAX_SENTENCE_LEN, MIN_PAIR_OCC, \
TREETAGGER_BASE_PATH, MAX_WORD_LEN, \
DATA_DIR, DATA_DIR_IN, DATA_DIR_OUT, ENC, \
NO_POS_SYM
# Input files (col file, row files and sparse matrix files) for DISSECT
OUTPUT_FILE_DE_DE_EN_SM = ''.join([DATA_DIR_OUT, 'de_de-en.sm']) #
OUTPUT_FILE_EN_EN_DE_SM = ''.join([DATA_DIR_OUT, 'en_en-de.sm']) #
OUTPUT_FILE_DE_EN_WORDS_COL = ''.join([DATA_DIR_OUT, 'de_en-words.col']) #
OUTPUT_FILE_DE_WORDS_ROW = ''.join([DATA_DIR_OUT, 'de-words.row']) #
OUTPUT_FILE_EN_WORDS_ROW = ''.join([DATA_DIR_OUT, 'en-words.row']) #
OUTPUT_FILE_DE_DE_EN_PKL = ''.join([DATA_DIR_OUT, 'de_de-en.pkl']) #
OUTPUT_FILE_EN_EN_DE_PKL = ''.join([DATA_DIR_OUT, 'en_en-de.pkl']) #
'''
DEPRECATED: output_file_1_sm, output_file_1_col,
output_file_1_row (same es col), output_file_1_pkl
will be implemented.
# Single language output files (despite *_ROW files)
# (Despite that: a *_ROW file *is* a *_COL file)
OUTPUT_FILE_DE_SM = ''.join([DATA_DIR_OUT, 'de.sm'])
OUTPUT_FILE_EN_SM = ''.join([DATA_DIR_OUT, 'en.sm'])
OUTPUT_FILE_DE_PKL = ''.join([DATA_DIR_OUT, 'de.pkl'])
OUTPUT_FILE_EN_PKL = ''.join([DATA_DIR_OUT, 'en.pkl'])
OUTPUT_FILE_DE_WORDS_COL = ''.join([DATA_DIR_OUT, 'de-words.col'])
OUTPUT_FILE_EN_WORDS_COL = ''.join([DATA_DIR_OUT, 'en-words.col'])
'''
# Symbols to ignore (besides stopwords)
IGNORE_LIST = ['.', ',', ';', '(', ')', '-', ':', '!', '?', '\'']
suffixes = Suffixes(LANG_1, LANG_2)
# Global variables for command-line control.
global single_language, use_treetagger, sentences_limit, lang_1, \
lang_2, treetagger_path, max_sentence_len, min_pair_occ, \
max_word_len, use_randomized_input, europarl_files, \
output_file_1_sm, output_file_2_sm, output_file_col, \
output_file_1_row, output_file_2_row, output_file_1_pkl, \
output_file_2_pkl
language_used = False
use_treetagger = False
use_randomized_input = False
sentences_limit = SENTENCES_LIMIT # Assign default number
max_sentence_len = MAX_SENTENCE_LEN
min_pair_occ = MIN_PAIR_OCC
max_word_len = MAX_WORD_LEN
lang_1 = LANG_1 # Default lang 1
lang_2 = LANG_2 # Default lang 2
# Default path; should be changed in parameters.py file, or at least
# set by --treetagger-path parameter option.
treetagger_path = TREETAGGER_BASE_PATH
# Parallalized sentences of europarl
# Input data gotten from here: http://www.statmt.org/europarl/
europarl_files = {}
class AlignedSentences:
def __init__(self, sentences_1, sentences_2,
filter_sentences=False):
'''
XXX: Some sanity should be done here to ensure
the number of sentences match.
'''
self.sentences_1 = sentences_1
self.sentences_2 = sentences_2
self.number_sentences = sentences_1.sentences.keys()[-1]
self.pairs_combined = defaultdict(int)
self.no_sentences_filtered = 0
# In parallel throw out sentences which are considered too long.
if filter_sentences == True:
self._filter_sentences()
def _filter_sentences(self):
'''Filter sentences upon criteria of word length. The idea is
to not compare too long sentences with each other.
'''
for sentence_no, sentence in self.sentences_1.sentences.items():
if len(sentence) > max_sentence_len or \
len(self.sentences_2.sentences[sentence_no]) > \
max_sentence_len:
self.no_sentences_filtered += 1
self.sentences_1.sentences[sentence_no] = ['DELETED']
self.sentences_2.sentences[sentence_no] = ['DELETED']
print >> stderr, self.no_sentences_filtered, \
"sentences emptied, " + \
"because of word length higher than " + \
str(max_sentence_len) + "."
def combine_words(self):
# Iterate through sentence numbers
for i in range(1, self.number_sentences + 1):
bilingual_sentence = self._get_bilingual_sentence(i)
# complexity: quadratic
pairs = combinations(bilingual_sentence, 2)
for pair in pairs:
self.pairs_combined[pair] += 1
print >> stderr, "Number of words combined:", \
len(self.pairs_combined)
def _write_sparse_matrix(self, output_file, lang):
"""Write out pairs in a sparse matrix format for DISSECT
cf.
http://clic.cimec.unitn.it/composes/toolkit/ex01input.html
"""
f = open(output_file, 'w')
if lang == lang_1:
for pair, count in self.pairs_combined.items():
if count >= min_pair_occ:
# We only want lang_1-lang_1 and lang_1-lang_2
# combinations.
if ''.join(['_', lang_1]) in pair[0]:
f.write(''.join([pair[0], ' ', pair[1],
' ', str(count), '\n']))
# Assume lang_2 is meant.
else:
for pair, count in self.pairs_combined.items():
if count >= min_pair_occ:
# We only want lang_2-lang_2 and lang_2-lang_1
# combinations.
if ''.join(['_', lang_2]) in pair[1]:
f.write(''.join([pair[1], ' ', pair[0],
' ', str(count), '\n']))
print >> stderr, 'SM file written out:', output_file
f.close()
def write_sparse_matrices(self):
""" Write sm matrices."""
# E. g. de-en
self._write_sparse_matrix(OUTPUT_FILE_DE_DE_EN_SM, lang_1)
# E. g. en-de
self._write_sparse_matrix(OUTPUT_FILE_EN_EN_DE_SM, lang_2)
def write_col(self):
"""Write out col of words (all words in both languages)
"""
col = set()
for pair, count in self.pairs_combined.items():
if count >= min_pair_occ:
col.add(pair[0])
col.add(pair[1])
f = open(OUTPUT_FILE_DE_EN_WORDS_COL, 'w')
for token in col:
f.write(''.join([token, '\n']))
f.close()
print >> stderr, \
'Col file written out:', \
OUTPUT_FILE_DE_EN_WORDS_COL
def write_row(self):
"""Write out row of words (language dependent each)
"""
row_1 = set()
row_2 = set()
for pair, count in self.pairs_combined.items():
if count >= min_pair_occ:
# Collect lang 1 words
if ''.join(['_', lang_1]) in pair[0]:
row_1.add(pair[0])
else:
row_2.add(pair[0])
# Collect lang 2 words
if ''.join(['_', lang_1]) in pair[1]:
row_1.add(pair[1])
else:
row_2.add(pair[1])
f = open(OUTPUT_FILE_DE_WORDS_ROW, 'w')
for token in row_1:
f.write(''.join([token, '\n']))
f.close()
print >> stderr, 'Row file written out:', \
OUTPUT_FILE_DE_WORDS_ROW
f = open(OUTPUT_FILE_EN_WORDS_ROW, 'w')
for token in row_2:
f.write(''.join([token, '\n']))
f.close()
print >> stderr, 'Row file written out:', \
OUTPUT_FILE_EN_WORDS_ROW
def write_pkl(self):
"""
Create spaces from co-occurrence counts in sparse format (.sm)
"""
# For direction DE-EN
my_space_1 = Space.build(data = OUTPUT_FILE_DE_DE_EN_SM ,
rows = OUTPUT_FILE_DE_WORDS_ROW,
cols = OUTPUT_FILE_DE_EN_WORDS_COL,
format = "sm")
# For direction EN-DE
my_space_2 = Space.build(data = OUTPUT_FILE_EN_EN_DE_SM,
rows = OUTPUT_FILE_EN_WORDS_ROW,
cols = OUTPUT_FILE_DE_EN_WORDS_COL,
format = "sm")
# Save the space objects in pickle format
io_utils.save(my_space_1, OUTPUT_FILE_DE_DE_EN_PKL)
io_utils.save(my_space_2, OUTPUT_FILE_EN_EN_DE_PKL)
print >> stderr, 'Pickle file 1 written out:', \
OUTPUT_FILE_DE_DE_EN_PKL
print >> stderr, 'Pickle file 2 written out:', \
OUTPUT_FILE_EN_EN_DE_PKL
def _get_bilingual_sentence(self, counter):
"""Get a sentence with united tokens."""
marked_sentence_1 = self._mark_tokens_by_lang(self.sentences_1.\
sentences[counter], self.sentences_1.lang)
marked_sentence_2 = self._mark_tokens_by_lang(self.sentences_2.\
sentences[counter], self.sentences_2.lang)
return marked_sentence_1 + marked_sentence_2
def _mark_tokens_by_lang(self, tokens, lang):
"""Mark tokens with a language tag -- as suffix."""
marked_tokens = []
# Only do this when TreeTagger wasn't used.
if not use_treetagger:
for token in tokens:
marked_tokens.append(token + '_' + lang)
else:
marked_tokens = tokens
return marked_tokens
class Sentences:
def __init__(self, lang):
self.lang = lang
self.sentences = {}
if use_treetagger:
self.treetagger = TreeTagger(TAGLANG=lang,
TAGDIR=treetagger_path,
TAGINENC=ENC,
TAGOUTENC=ENC)
def read_sentences(self):
"""Read in sentences from a file in a given language.
"""
with open(europarl_files[self.lang], 'r') as f:
i = 0
for sentence in f:
i += 1
# Show some progress
if self._is_sentence_to_print(i, 10):
print >> stderr, "Number of \'" + self.lang + \
"\' sentences processed:", str(i).rjust(9)
# Process sentence furtherly (tokenization & filtering)
if use_treetagger:
self._process_sentence_tt(sentence.rstrip(), i)
else:
self._process_sentence(sentence.rstrip(), i)
# Eventually stop
if sentences_limit == i:
break
print >> stderr, '=> Number of sentences \'' \
+ self.lang + '\' read in:', i
def _process_sentence(self, sentence, counter):
tokens = word_tokenize(sentence)
tokens_filtered = self._filter_tokens(tokens)
self.sentences[counter] = tokens_filtered
def _process_sentence_tt(self, sentence, counter):
"""Process sentence with Treetagger"""
tokens_pos_tagged = []
treetagger_tokens = self.treetagger.TagText(sentence)
token_pos_tagged = None
for token in treetagger_tokens:
token_pos_tagged = token.split('\t')
if len(token_pos_tagged) != 3:
print >> stderr, \
"Caution -- broken TreeTagger case: ", \
token_pos_tagged, "(list)"
continue # Skip it
pos_tag = getTag(token_pos_tagged[1], lang_1)
token = token_pos_tagged[2].lower()
# Those cases we don't want.
if not token in ["<unknown>", "@ord@", "@card@"] and \
not pos_tag == NO_POS_SYM and len(token) <= max_word_len:
token += '_' + pos_tag + '_' + self.lang
tokens_pos_tagged.append(token)
self.sentences[counter] = tokens_pos_tagged
def _filter_tokens(self, tokens):
tokens_filtered = []
for token in tokens:
# Do not hold interpunctional signs and stopwords
if token not in IGNORE_LIST and \
token.lower() not in stopwords.\
words(LONG_LANGTAG[self.lang]) \
and len(token) <= max_word_len:
# Add hold tokens in lowered form
tokens_filtered.append(token.lower())
return tokens_filtered
def _is_sentence_to_print(self, counter, number):
"""Just the number of sentences to be read to display.
counter: Number of sentences already processed.
number: Number of sentences that have to be processed.
"""
if counter % number == 0:
return True
return False
def create_folder(dir_location, problem_str):
"""
Create folders if not already done.
@param dir_location: Concrete path to check and eventually create.
@param problem_str: String to indicate there's a folder missing.
"""
if not exists(dir_location):
print >> stderr, problem_str + dir_location
makedirs(dir_location)
print >> stderr, "Now created."
def handle_arguments():
"""This function handles command-line options and arguments
provided."""
# Variables here are to be seen and set globally.
global single_language, use_treetagger, sentences_limit, lang_1, \
lang_2, treetagger_path, max_sentence_len, min_pair_occ, \
max_word_len, use_randomized_input, europarl_files
argparser = ArgumentParser(description=\
'Create DISSECT input material.')
argparser.add_argument('-s', '--single-language',
help="Specifies that input material " + \
"only for specified language is " + \
"created.",
type=str)
argparser.add_argument('-t', '--use-treetagger',
help="Make sure TreeTagger is used for " + \
"lemmatization and PoS tagging.",
action="store_true")
argparser.add_argument('-ls', '--sentences-limit',
help="Make sure there's a (low) maximum " + \
"number of sentences to read in.",
type=int)
argparser.add_argument('-tp', '--treetagger-path',
help="Specifiy TreeTagger base " + \
"directory path (relatively or " + \
"absolutely).",
type=str)
argparser.add_argument('-ms', '--max-sentence-len',
help="Make sure no sentence considered " + \
"is longer than this number of " + \
"words.",
type=int)
argparser.add_argument('-mpo', '--min-pair-occ',
help="Define how often a pair should at " + \
"least occur to be considered.",
type=int)
argparser.add_argument('-mw', '--max-word-len',
help="Allow to specify how long a word " + \
"might be for it to be considered.",
type=int)
argparser.add_argument('-r', '--use-randomized-input',
help="This option looks for input data " + \
"with suffix '_rand' in the name " + \
"in order to use randomized input " + \
"lines.",
action="store_true")
argparser.add_argument('lang_1', nargs="?")
argparser.add_argument('lang_2', nargs="?")
pargs = argparser.parse_args()
# User only wants a specific language, e. g. 'de' for German
if(pargs.single_language):
single_language = pargs.single_language.lower()
else:
single_language = None
# User can decide to use TreeTagger (for lemmatization and PoS
# tagging).
if(pargs.use_treetagger):
use_treetagger = True
# Number of sentences we allow.
if(pargs.sentences_limit):
sentences_limit = pargs.sentences_limit
# Languages can be specified as arguments.
if(pargs.lang_1):
if (pargs.lang_2):
lang_1, lang_2 = pargs.lang_1, pargs.lang_2
else:
argparser.print_help()
exit(2)
# Check if TreeTagger path was specified.
if(pargs.treetagger_path):
treetagger_path = pargs.treetagger_path
# Make sure there's a directory seperator at the end.
if treetagger_path[-1] != sep:
treetagger_path += sep
# Check for limits (mins & maxes)
if (pargs.min_pair_occ):
min_pair_occ = pargs.min_pair_occ
if (pargs.max_sentence_len):
max_sentence_len = pargs.max_sentence_len
if (pargs.max_word_len):
max_word_len = pargs.max_word_len
if (pargs.use_randomized_input):
use_randomized_input = True
europarl_files = {
lang_1 : suffixes.\
europarl_filepaths(randfile=use_randomized_input)[0],
lang_2 : suffixes.\
europarl_filepaths(randfile=use_randomized_input)[1]
}
# Check if any input data is missing
for lang in europarl_files.keys():
if not exists(europarl_files[lang]):
print >> stderr, 'Input data \'' + lang + \
+ '\' is missing.' + \
' (Check location: ' + europarl_files[lang] + ')'
def create_bilingual_input():
"""Creates input material for two languages (bilingual input
material), which is interesting to create translation
candidates between two languages' words."""
# Read first language's sentences
sentences_1 = Sentences(lang_1)
sentences_1.read_sentences()
# Read second language's sentences
sentences_2 = Sentences(lang_2)
sentences_2.read_sentences()
# Combine words on basis of their sentences after filtering out
# long sentences.
aligned_sentences = AlignedSentences(sentences_1,
sentences_2,
filter_sentences=True)
aligned_sentences.combine_words()
# Write pairs in sparse matrix format
aligned_sentences.write_sparse_matrices()
# Write tokens to a col format
aligned_sentences.write_col()
# Write tokens to a row format
aligned_sentences.write_row()
# Write pickle files (for faster processing in
# besttranslations.py)
aligned_sentences.write_pkl()
def create_singlelang_input():
"""Creates input material for a single language, which can be
used to look for similarities between words."""
sentences = Sentences(single_language)
sentences.read_sentences()
# XXX: Still not functional.
def main():
# Handle command-line arguments and options.
handle_arguments()
# Create input and output folders if not the case already.
create_folder(DATA_DIR_OUT, "Output dir not given: ")
create_folder(DATA_DIR_IN, "Input dir not given: ")
# single_language is None if not used.
# Normal use is to work with two languages.
if not single_language:
create_bilingual_input()
else:
create_singlelang_input()
if __name__ == '__main__':
main()