def train(self, corpora, lang, working_dir=".", log_file=None): LanguageModel.train(self, corpora, lang, working_dir, log_file) log = shell.DEVNULL try: if log_file is not None: log = open(log_file, "w") # Collapse all corpora into a single text file merged_corpus = os.path.join(working_dir, "merge") fileutils.merge([corpus.get_file(lang) for corpus in corpora], merged_corpus) input_se = os.path.join(working_dir, "static_input.se") temp = os.path.join(working_dir, "temp") arpa_file = os.path.join(working_dir, "static_lm.arpa") # Add start and end symbols with open(merged_corpus) as stdin: with open(input_se, "w") as stdout: shell.execute([self._addbound_bin], stdin=stdin, stdout=stdout, stderr=log) # Creating lm in ARPA format command = [ self._buildlm_bin, "-i", input_se, "-k", str(cpu_count()), "-o", arpa_file, "-n", str(self._order), "-s", "witten-bell", "-t", temp, "-l", "/dev/stdout", "-irstlm", self._irstlm_dir, "--PruneSingletons", ] shell.execute(command, stderr=log) # Create binary lm command = [self._compilelm_bin, arpa_file + ".gz", self._model] shell.execute(command, stderr=log) finally: if log_file is not None: log.close()
def train(self, corpora, lang, working_dir='.', log_file=None): LanguageModel.train(self, corpora, lang, working_dir, log_file) log = shell.DEVNULL try: if log_file is not None: log = open(log_file, 'w') # Collapse all corpora into a single text file merged_corpus = os.path.join(working_dir, 'merge') fileutils.merge([corpus.get_file(lang) for corpus in corpora], merged_corpus) input_se = os.path.join(working_dir, 'static_input.se') temp = os.path.join(working_dir, 'temp') arpa_file = os.path.join(working_dir, 'static_lm.arpa') # Add start and end symbols with open(merged_corpus) as stdin: with open(input_se, 'w') as stdout: shell.execute([self._addbound_bin], stdin=stdin, stdout=stdout, stderr=log) # Creating lm in ARPA format command = [ self._buildlm_bin, '-i', input_se, '-k', str(cpu_count()), '-o', arpa_file, '-n', str(self._order), '-s', 'witten-bell', '-t', temp, '-l', '/dev/stdout', '-irstlm', self._irstlm_dir, '--PruneSingletons' ] shell.execute(command, stderr=log) # Create binary lm command = [self._compilelm_bin, arpa_file + '.gz', self._model] shell.execute(command, stderr=log) finally: if log_file is not None: log.close()
def train(self, corpora, lang, working_dir=".", log_file=None): LanguageModel.train(self, corpora, lang, working_dir, log_file) log = shell.DEVNULL try: if log_file is not None: log = open(log_file, "w") if isinstance(log_file, str) else log_file # Collapse all corpora into a single text file merged_corpus = os.path.join(working_dir, "merge") fileutils.merge([corpus.get_file(lang) for corpus in corpora], merged_corpus) # Create language model in ARPA format arpa_file = os.path.join(working_dir, "lm.arpa") arpa_command = [ self._lmplz_bin, "--discount_fallback", "-o", str(self._order), "-S", str(self.get_mem_percent()) + "%", "-T", working_dir, ] if self._order > 2 and self.prune: arpa_command += ["--prune", "0", "0", "1"] with open(merged_corpus) as stdin: with open(arpa_file, "w") as stdout: shell.execute(arpa_command, stdin=stdin, stdout=stdout, stderr=log) # Binarize ARPA file binarize_command = [self._bbinary_bin, arpa_file, self._model] shell.execute(binarize_command, stdout=log, stderr=log) finally: if log_file is not None and isinstance(log_file, str): log.close()
def train(self, corpora, lang, working_dir='.', log_file=None): LanguageModel.train(self, corpora, lang, working_dir, log_file) log = shell.DEVNULL try: if log_file is not None: log = open(log_file, 'w') if isinstance(log_file, str) else log_file # Collapse all corpora into a single text file merged_corpus = os.path.join(working_dir, 'merge') fileutils.merge([corpus.get_file(lang) for corpus in corpora], merged_corpus) # Create language model in ARPA format arpa_file = os.path.join(working_dir, 'lm.arpa') arpa_command = [ self._lmplz_bin, '--discount_fallback', '-o', str(self._order), '-S', str(self.get_mem_percent()) + '%', '-T', working_dir ] if self._order > 2 and self.prune: arpa_command += ['--prune', '0', '0', '1'] with open(merged_corpus) as stdin: with open(arpa_file, 'w') as stdout: shell.execute(arpa_command, stdin=stdin, stdout=stdout, stderr=log) # Binarize ARPA file binarize_command = [self._bbinary_bin, arpa_file, self._model] shell.execute(binarize_command, stdout=log, stderr=log) finally: if log_file is not None and isinstance(log_file, str): log.close()
def train(self, corpora, aligner, working_dir='.', log_file=None): if os.path.isdir(self._model) and len(os.listdir(self._model)) > 0: raise Exception('Model already exists at ' + self._model) if not os.path.isdir(self._model): fileutils.makedirs(self._model, exist_ok=True) if not os.path.isdir(working_dir): fileutils.makedirs(working_dir, exist_ok=True) l1 = self._source_lang l2 = self._target_lang langs = (l1, l2) langs_suffix = l1 + '-' + l2 mct_base = self._get_model_basename() dmp_file = mct_base + '.dmp' mam_file = mct_base + '.' + langs_suffix + '.mam' lex_file = mct_base + '.' + langs_suffix + '.lex' log = shell.DEVNULL try: if log_file is not None: log = open(log_file, 'a') # Clean corpus for training clean_output = os.path.join(working_dir, 'clean_corpora') fileutils.makedirs(clean_output, exist_ok=True) corpora = self._cleaner.clean(corpora, clean_output, (self._source_lang, self._target_lang)) # Create merged corpus and domains list file (dmp) merged_corpus = ParallelCorpus(os.path.basename(mct_base), working_dir, langs) fileutils.merge([corpus.get_file(l1) for corpus in corpora], merged_corpus.get_file(l1)) fileutils.merge([corpus.get_file(l2) for corpus in corpora], merged_corpus.get_file(l2)) with open(dmp_file, 'w') as dmp: for corpus in corpora: dmp.write(str(corpus.name) + ' ' + str(corpus.count_lines()) + '\n') # Create alignments in 'bal' file and symmetrize bal_file = aligner.align(merged_corpus, langs, self._model, working_dir, log_file) symal_file = os.path.join(working_dir, 'alignments.' + langs_suffix + '.symal') symal_command = [self._symal_bin, '-a=g', '-d=yes', '-f=yes', '-b=yes'] with open(bal_file) as stdin: with open(symal_file, 'w') as stdout: shell.execute(symal_command, stdin=stdin, stdout=stdout, stderr=log) # Execute mtt-build mttbuild_command = self._get_mttbuild_command(mct_base, dmp_file, l1) with open(merged_corpus.get_file(l1)) as stdin: shell.execute(mttbuild_command, stdin=stdin, stdout=log, stderr=log) mttbuild_command = self._get_mttbuild_command(mct_base, dmp_file, l2) with open(merged_corpus.get_file(l2)) as stdin: shell.execute(mttbuild_command, stdin=stdin, stdout=log, stderr=log) # Create 'mam' file mam_command = [self._symal2mam_bin, mam_file] with open(symal_file) as stdin: shell.execute(mam_command, stdin=stdin, stdout=log, stderr=log) # Create 'lex' file lex_command = [self._mmlexbuild_bin, mct_base + '.', l1, l2, '-o', lex_file] shell.execute(lex_command, stdout=log, stderr=log) finally: if log_file is not None: log.close()
def tune(self, corpora=None, tokenize=True, debug=False, context_enabled=True): if corpora is None: corpora = ParallelCorpus.list(os.path.join(self.engine.data_path, TrainingPreprocessor.DEV_FOLDER_NAME)) if len(corpora) == 0: raise IllegalArgumentException('empty corpora') if not self.is_running(): raise IllegalStateException('No MMT Server running, start the engine first') target_lang = self.engine.target_lang source_lang = self.engine.source_lang cmdlogger = _tuning_logger(4 if tokenize else 3) cmdlogger.start(self, corpora) working_dir = self.engine.get_tempdir('tuning') mert_wd = os.path.join(working_dir, 'mert') try: original_corpora = corpora # Tokenization tokenized_corpora = original_corpora if tokenize: tokenizer_output = os.path.join(working_dir, 'tokenized_corpora') fileutils.makedirs(tokenizer_output, exist_ok=True) with cmdlogger.step('Corpus tokenization') as _: tokenized_corpora = self.engine.preprocessor.process(corpora, tokenizer_output, print_tags=False, print_placeholders=True, original_spacing=False) # Create merged corpus with cmdlogger.step('Merging corpus') as _: source_merged_corpus = os.path.join(working_dir, 'corpus.' + source_lang) with open(source_merged_corpus, 'wb') as out: original_root = original_corpora[0].root for corpus in tokenized_corpora: tokenized = corpus.get_file(source_lang) original = os.path.join(original_root, corpus.name + '.' + source_lang) out.write(tokenized + ':' + original + '\n') target_merged_corpus = os.path.join(working_dir, 'corpus.' + target_lang) fileutils.merge([corpus.get_file(target_lang) for corpus in tokenized_corpora], target_merged_corpus) # Run MERT algorithm with cmdlogger.step('Tuning') as _: # Start MERT decoder_flags = ['--port', str(self.api.port)] if not context_enabled: decoder_flags.append('--skip-context-analysis') decoder_flags.append('1') fileutils.makedirs(mert_wd, exist_ok=True) with tempfile.NamedTemporaryFile() as runtime_moses_ini: command = [self._mert_script, source_merged_corpus, target_merged_corpus, self._mert_i_script, runtime_moses_ini.name, '--threads', str(multiprocessing.cpu_count()), '--mertdir', os.path.join(Moses.bin_path, 'bin'), '--mertargs', '\'--binary --sctype BLEU\'', '--working-dir', mert_wd, '--nbest', '100', '--decoder-flags', '"' + ' '.join(decoder_flags) + '"', '--nonorm', '--closest', '--no-filter-phrase-table'] with open(self.engine.get_logfile('mert'), 'wb') as log: shell.execute(' '.join(command), stdout=log, stderr=log) # Read optimized configuration with cmdlogger.step('Applying changes') as _: bleu_score = 0 weights = {} found_weights = False with open(os.path.join(mert_wd, 'moses.ini')) as moses_ini: for line in moses_ini: line = line.strip() if len(line) == 0: continue elif found_weights: tokens = line.split() weights[tokens[0].rstrip('=')] = [float(val) for val in tokens[1:]] elif line.startswith('# BLEU'): bleu_score = float(line.split()[2]) elif line == '[weight]': found_weights = True _ = self.api.update_features(weights) cmdlogger.completed(bleu_score) finally: if not debug: self.engine.clear_tempdir()
def tune(self, corpora=None, tokenize=True, debug=False, context_enabled=True): if corpora is None: corpora = ParallelCorpus.list( os.path.join(self.engine.data_path, TrainingPreprocessor.DEV_FOLDER_NAME)) if len(corpora) == 0: raise IllegalArgumentException('empty corpora') if not self.is_running(): raise IllegalStateException( 'No MMT Server running, start the engine first') target_lang = self.engine.target_lang source_lang = self.engine.source_lang cmdlogger = _tuning_logger(4 if tokenize else 3) cmdlogger.start(self, corpora) working_dir = self.engine.get_tempdir('tuning') mert_wd = os.path.join(working_dir, 'mert') try: original_corpora = corpora # Tokenization tokenized_corpora = original_corpora if tokenize: tokenizer_output = os.path.join(working_dir, 'tokenized_corpora') fileutils.makedirs(tokenizer_output, exist_ok=True) with cmdlogger.step('Corpus tokenization') as _: tokenized_corpora = self.engine.preprocessor.process( corpora, tokenizer_output, print_tags=False, print_placeholders=True, original_spacing=False) # Create merged corpus with cmdlogger.step('Merging corpus') as _: source_merged_corpus = os.path.join(working_dir, 'corpus.' + source_lang) with open(source_merged_corpus, 'wb') as out: original_root = original_corpora[0].root for corpus in tokenized_corpora: tokenized = corpus.get_file(source_lang) original = os.path.join( original_root, corpus.name + '.' + source_lang) out.write(tokenized + ':' + original + '\n') target_merged_corpus = os.path.join(working_dir, 'corpus.' + target_lang) fileutils.merge([ corpus.get_file(target_lang) for corpus in tokenized_corpora ], target_merged_corpus) # Run MERT algorithm with cmdlogger.step('Tuning') as _: # Start MERT decoder_flags = ['--port', str(self.api.port)] if not context_enabled: decoder_flags.append('--skip-context-analysis') decoder_flags.append('1') fileutils.makedirs(mert_wd, exist_ok=True) with tempfile.NamedTemporaryFile() as runtime_moses_ini: command = [ self._mert_script, source_merged_corpus, target_merged_corpus, self._mert_i_script, runtime_moses_ini.name, '--threads', str(multiprocessing.cpu_count()), '--mertdir', os.path.join(Moses.bin_path, 'bin'), '--mertargs', '\'--binary --sctype BLEU\'', '--working-dir', mert_wd, '--nbest', '100', '--decoder-flags', '"' + ' '.join(decoder_flags) + '"', '--nonorm', '--closest', '--no-filter-phrase-table' ] with open(self.engine.get_logfile('mert'), 'wb') as log: shell.execute(' '.join(command), stdout=log, stderr=log) # Read optimized configuration with cmdlogger.step('Applying changes') as _: bleu_score = 0 weights = {} found_weights = False with open(os.path.join(mert_wd, 'moses.ini')) as moses_ini: for line in moses_ini: line = line.strip() if len(line) == 0: continue elif found_weights: tokens = line.split() weights[tokens[0].rstrip('=')] = [ float(val) for val in tokens[1:] ] elif line.startswith('# BLEU'): bleu_score = float(line.split()[2]) elif line == '[weight]': found_weights = True _ = self.api.update_features(weights) cmdlogger.completed(bleu_score) finally: if not debug: self.engine.clear_tempdir()
def evaluate(self, corpora, heval_output=None, debug=False): if len(corpora) == 0: raise IllegalArgumentException('empty corpora') if heval_output is not None: fileutils.makedirs(heval_output, exist_ok=True) target_lang = self._engine.target_lang source_lang = self._engine.source_lang logger = _evaluate_logger() logger.start(corpora) working_dir = self._engine.get_tempdir('evaluation') try: results = [] # Process references with logger.step('Preparing corpora') as _: corpora_path = os.path.join(working_dir, 'corpora') corpora = self._xmlencoder.encode(corpora, corpora_path) reference = os.path.join(working_dir, 'reference.' + target_lang) source = os.path.join(working_dir, 'source.' + source_lang) fileutils.merge([corpus.get_file(target_lang) for corpus in corpora], reference) fileutils.merge([corpus.get_file(source_lang) for corpus in corpora], source) if heval_output is not None: self._heval_outputter.write(lang=target_lang, input_file=reference, output_file=os.path.join(heval_output, 'reference.' + target_lang)) self._heval_outputter.write(lang=source_lang, input_file=source, output_file=os.path.join(heval_output, 'source.' + source_lang)) # Translate for translator in self._translators: name = translator.name() with logger.step('Translating with %s' % name) as _: result = _EvaluationResult(translator) results.append(result) translations_path = os.path.join(working_dir, 'translations', result.id + '.raw') xmltranslations_path = os.path.join(working_dir, 'translations', result.id) fileutils.makedirs(translations_path, exist_ok=True) try: translated, mtt, parallelism = translator.translate(corpora, translations_path) filename = result.id + '.' + target_lang result.mtt = mtt result.parallelism = parallelism result.translated_corpora = self._xmlencoder.encode(translated, xmltranslations_path) result.merge = os.path.join(working_dir, filename) fileutils.merge([corpus.get_file(target_lang) for corpus in result.translated_corpora], result.merge) if heval_output is not None: self._heval_outputter.write(lang=target_lang, input_file=result.merge, output_file=os.path.join(heval_output, filename)) except TranslateError as e: result.error = e except Exception as e: result.error = TranslateError('Unexpected ERROR: ' + str(e.message)) # Scoring scorers = [(MatecatScore(), 'pes'), (BLEUScore(), 'bleu')] for scorer, field in scorers: with logger.step('Calculating %s' % scorer.name()) as _: for result in results: if result.error is not None: continue setattr(result, field, scorer.calculate(result.merge, reference)) logger.completed(results, scorers) finally: if not debug: self._engine.clear_tempdir('evaluation')