def main_sweep(argv): parser = argparse.ArgumentParser( description= 'Sweep SA sample size and measure BLEU scores at various settings.') parser.add_argument( '-e', '--engine', dest='engine', help='the engine name, \'default\' will be used if absent', default=None) parser.add_argument( '--path', dest='corpora_path', metavar='CORPORA', default=None, help= 'the path to the test corpora (default is the automatically splitted sample)' ) args = parser.parse_args(argv) samples = [ int(e) for e in '10 20 50 70 80 90 100 110 120 150 200 350 500 800 1000 2000 5000'. split() ] node = ConfiguredClusterNode(args.engine) # more or less copy-pasted from mmt evaluate: evaluator = Evaluator(node.engine, node) corpora = ParallelCorpus.list(args.corpora_path) if args.corpora_path is not None \ else ParallelCorpus.list(os.path.join(node.engine.data_path, TrainingPreprocessor.TEST_FOLDER_NAME)) lines = 0 for corpus in corpora: lines += corpus.count_lines() # end copy-paste print('sample bleu') for sample in samples: node.set('suffixarrays', 'sample', sample) node.apply_configs() scores = evaluator.evaluate(corpora=corpora, google_key='1234', heval_output=None, use_sessions=True, debug=False) engine_scores = scores['MMT'] if isinstance(engine_scores, str): raise RuntimeError(engine_scores) bleu = engine_scores['bleu'] print(sample, '%.2f' % (bleu * 100))
def clean(self, corpora, dest_folder, langs=None): if langs is None and len(corpora) > 0: langs = (corpora[0].langs[0], corpora[0].langs[1]) self._pool_exec(self._clean_file, [(corpus, ParallelCorpus(corpus.name, dest_folder, corpus.langs), langs) for corpus in corpora]) return ParallelCorpus.list(dest_folder)
def encode(self, corpora, dest_folder): if not os.path.isdir(dest_folder): fileutils.makedirs(dest_folder, exist_ok=True) for corpus in corpora: for lang in corpus.langs: source = corpus.get_file(lang) dest = ParallelCorpus(corpus.name, dest_folder, [lang]).get_file(lang) self.encode_file(source, dest) return ParallelCorpus.list(dest_folder)
def process(self, corpora, dest_folder, print_tags=True, print_placeholders=False, original_spacing=False): for corpus in corpora: for lang in corpus.langs: source = corpus.get_file(lang) dest = ParallelCorpus(corpus.name, dest_folder, [lang]).get_file(lang) self.__process_file(source, dest, lang, print_tags, print_placeholders, original_spacing) return ParallelCorpus.list(dest_folder)
def process(self, source, target, input_paths, output_path, data_path=None): args = ['-s', source, '-t', target, '--output', output_path, '--input'] for root in input_paths: args.append(root) if data_path is not None: args.append('--dev') args.append( os.path.join(data_path, TrainingPreprocessor.DEV_FOLDER_NAME)) args.append('--test') args.append( os.path.join(data_path, TrainingPreprocessor.TEST_FOLDER_NAME)) command = mmt_javamain(self._java_mainclass, args) shell.execute(command, stdin=shell.DEVNULL, stdout=shell.DEVNULL, stderr=shell.DEVNULL) return ParallelCorpus.splitlist(source, target, roots=output_path)
def translate(self, corpora, output): """ Translate the given corpora in parallel processing fashion. :param corpora: list of ParallelCorpus :param output: path to output directory :return: ([ParallelCorpus, ...], time_per_sentence, parallelism) """ pool = multithread.Pool(self._threads) try: translations = [] start_time = datetime.now() for corpus in corpora: self._before_translate(corpus) with open(corpus.get_file(self.source_lang)) as source: output_path = os.path.join(output, corpus.name + '.' + self.target_lang) for line in source: translation = pool.apply_async(self._get_translation, (line, corpus)) translations.append((translation, output_path)) self._after_translate(corpus) elapsed_time = 0 translation_count = 0 path = None stream = None for translation_job, output_path in translations: translation, elapsed = translation_job.get() if output_path != path: if stream is not None: stream.close() stream = open(output_path, 'wb') path = output_path stream.write(translation.encode('utf-8')) stream.write('\n') elapsed_time += elapsed translation_count += 1 if stream is not None: stream.close() end_time = datetime.now() total_time = end_time - start_time return ParallelCorpus.list(output), (elapsed_time / translation_count), ( elapsed_time / total_time.total_seconds()) finally: pool.terminate()
def main_sweep(argv): parser = argparse.ArgumentParser(description='Sweep SA sample size and measure BLEU scores at various settings.') parser.add_argument('-e', '--engine', dest='engine', help='the engine name, \'default\' will be used if absent', default=None) parser.add_argument('--path', dest='corpora_path', metavar='CORPORA', default=None, help='the path to the test corpora (default is the automatically splitted sample)') args = parser.parse_args(argv) samples = [int(e) for e in '10 20 50 70 80 90 100 110 120 150 200 350 500 800 1000 2000 5000'.split()] node = ConfiguredClusterNode(args.engine) # more or less copy-pasted from mmt evaluate: evaluator = Evaluator(node.engine, node) corpora = ParallelCorpus.list(args.corpora_path) if args.corpora_path is not None \ else ParallelCorpus.list(os.path.join(node.engine.data_path, TrainingPreprocessor.TEST_FOLDER_NAME)) lines = 0 for corpus in corpora: lines += corpus.count_lines() # end copy-paste print('sample bleu') for sample in samples: node.set('suffixarrays', 'sample', sample) node.apply_configs() scores = evaluator.evaluate(corpora=corpora, google_key='1234', heval_output=None, use_sessions=True, debug=False) engine_scores = scores['MMT'] if isinstance(engine_scores, str): raise RuntimeError(engine_scores) bleu = engine_scores['bleu'] print(sample, '%.2f' % (bleu * 100))
def clean(self, source, target, input_paths, output_path): args = ['-s', source, '-t', target, '--output', output_path, '--input'] for root in input_paths: args.append(root) command = mmt_javamain(self._java_mainclass, args) shell.execute(command, stdin=shell.DEVNULL, stdout=shell.DEVNULL, stderr=shell.DEVNULL) return ParallelCorpus.splitlist(source, target, roots=output_path)[0]
def train(self, corpora, aligner, working_dir='.', log_file=None): if os.path.isdir(self._model) and len(os.listdir(self._model)) > 0: raise Exception('Model already exists at ' + self._model) if not os.path.isdir(self._model): fileutils.makedirs(self._model, exist_ok=True) if not os.path.isdir(working_dir): fileutils.makedirs(working_dir, exist_ok=True) l1 = self._source_lang l2 = self._target_lang langs = (l1, l2) langs_suffix = l1 + '-' + l2 mct_base = self._get_model_basename() dmp_file = mct_base + '.dmp' mam_file = mct_base + '.' + langs_suffix + '.mam' lex_file = mct_base + '.' + langs_suffix + '.lex' log = shell.DEVNULL try: if log_file is not None: log = open(log_file, 'a') # Clean corpus for training clean_output = os.path.join(working_dir, 'clean_corpora') fileutils.makedirs(clean_output, exist_ok=True) corpora = self._cleaner.clean(corpora, clean_output, (self._source_lang, self._target_lang)) # Create merged corpus and domains list file (dmp) merged_corpus = ParallelCorpus(os.path.basename(mct_base), working_dir, langs) fileutils.merge([corpus.get_file(l1) for corpus in corpora], merged_corpus.get_file(l1)) fileutils.merge([corpus.get_file(l2) for corpus in corpora], merged_corpus.get_file(l2)) with open(dmp_file, 'w') as dmp: for corpus in corpora: dmp.write(str(corpus.name) + ' ' + str(corpus.count_lines()) + '\n') # Create alignments in 'bal' file and symmetrize bal_file = aligner.align(merged_corpus, langs, self._model, working_dir, log_file) symal_file = os.path.join(working_dir, 'alignments.' + langs_suffix + '.symal') symal_command = [self._symal_bin, '-a=g', '-d=yes', '-f=yes', '-b=yes'] with open(bal_file) as stdin: with open(symal_file, 'w') as stdout: shell.execute(symal_command, stdin=stdin, stdout=stdout, stderr=log) # Execute mtt-build mttbuild_command = self._get_mttbuild_command(mct_base, dmp_file, l1) with open(merged_corpus.get_file(l1)) as stdin: shell.execute(mttbuild_command, stdin=stdin, stdout=log, stderr=log) mttbuild_command = self._get_mttbuild_command(mct_base, dmp_file, l2) with open(merged_corpus.get_file(l2)) as stdin: shell.execute(mttbuild_command, stdin=stdin, stdout=log, stderr=log) # Create 'mam' file mam_command = [self._symal2mam_bin, mam_file] with open(symal_file) as stdin: shell.execute(mam_command, stdin=stdin, stdout=log, stderr=log) # Create 'lex' file lex_command = [self._mmlexbuild_bin, mct_base + '.', l1, l2, '-o', lex_file] shell.execute(lex_command, stdout=log, stderr=log) finally: if log_file is not None: log.close()
def tune(self, corpora=None, tokenize=True, debug=False, context_enabled=True): if corpora is None: corpora = ParallelCorpus.list(os.path.join(self.engine.data_path, TrainingPreprocessor.DEV_FOLDER_NAME)) if len(corpora) == 0: raise IllegalArgumentException('empty corpora') if not self.is_running(): raise IllegalStateException('No MMT Server running, start the engine first') target_lang = self.engine.target_lang source_lang = self.engine.source_lang cmdlogger = _tuning_logger(4 if tokenize else 3) cmdlogger.start(self, corpora) working_dir = self.engine.get_tempdir('tuning') mert_wd = os.path.join(working_dir, 'mert') try: original_corpora = corpora # Tokenization tokenized_corpora = original_corpora if tokenize: tokenizer_output = os.path.join(working_dir, 'tokenized_corpora') fileutils.makedirs(tokenizer_output, exist_ok=True) with cmdlogger.step('Corpus tokenization') as _: tokenized_corpora = self.engine.preprocessor.process(corpora, tokenizer_output, print_tags=False, print_placeholders=True, original_spacing=False) # Create merged corpus with cmdlogger.step('Merging corpus') as _: source_merged_corpus = os.path.join(working_dir, 'corpus.' + source_lang) with open(source_merged_corpus, 'wb') as out: original_root = original_corpora[0].root for corpus in tokenized_corpora: tokenized = corpus.get_file(source_lang) original = os.path.join(original_root, corpus.name + '.' + source_lang) out.write(tokenized + ':' + original + '\n') target_merged_corpus = os.path.join(working_dir, 'corpus.' + target_lang) fileutils.merge([corpus.get_file(target_lang) for corpus in tokenized_corpora], target_merged_corpus) # Run MERT algorithm with cmdlogger.step('Tuning') as _: # Start MERT decoder_flags = ['--port', str(self.api.port)] if not context_enabled: decoder_flags.append('--skip-context-analysis') decoder_flags.append('1') fileutils.makedirs(mert_wd, exist_ok=True) with tempfile.NamedTemporaryFile() as runtime_moses_ini: command = [self._mert_script, source_merged_corpus, target_merged_corpus, self._mert_i_script, runtime_moses_ini.name, '--threads', str(multiprocessing.cpu_count()), '--mertdir', os.path.join(Moses.bin_path, 'bin'), '--mertargs', '\'--binary --sctype BLEU\'', '--working-dir', mert_wd, '--nbest', '100', '--decoder-flags', '"' + ' '.join(decoder_flags) + '"', '--nonorm', '--closest', '--no-filter-phrase-table'] with open(self.engine.get_logfile('mert'), 'wb') as log: shell.execute(' '.join(command), stdout=log, stderr=log) # Read optimized configuration with cmdlogger.step('Applying changes') as _: bleu_score = 0 weights = {} found_weights = False with open(os.path.join(mert_wd, 'moses.ini')) as moses_ini: for line in moses_ini: line = line.strip() if len(line) == 0: continue elif found_weights: tokens = line.split() weights[tokens[0].rstrip('=')] = [float(val) for val in tokens[1:]] elif line.startswith('# BLEU'): bleu_score = float(line.split()[2]) elif line == '[weight]': found_weights = True _ = self.api.update_features(weights) cmdlogger.completed(bleu_score) finally: if not debug: self.engine.clear_tempdir()
def tune(self, corpora=None, tokenize=True, debug=False, context_enabled=True): if corpora is None: corpora = ParallelCorpus.list( os.path.join(self.engine.data_path, TrainingPreprocessor.DEV_FOLDER_NAME)) if len(corpora) == 0: raise IllegalArgumentException('empty corpora') if not self.is_running(): raise IllegalStateException( 'No MMT Server running, start the engine first') target_lang = self.engine.target_lang source_lang = self.engine.source_lang cmdlogger = _tuning_logger(4 if tokenize else 3) cmdlogger.start(self, corpora) working_dir = self.engine.get_tempdir('tuning') mert_wd = os.path.join(working_dir, 'mert') try: original_corpora = corpora # Tokenization tokenized_corpora = original_corpora if tokenize: tokenizer_output = os.path.join(working_dir, 'tokenized_corpora') fileutils.makedirs(tokenizer_output, exist_ok=True) with cmdlogger.step('Corpus tokenization') as _: tokenized_corpora = self.engine.preprocessor.process( corpora, tokenizer_output, print_tags=False, print_placeholders=True, original_spacing=False) # Create merged corpus with cmdlogger.step('Merging corpus') as _: source_merged_corpus = os.path.join(working_dir, 'corpus.' + source_lang) with open(source_merged_corpus, 'wb') as out: original_root = original_corpora[0].root for corpus in tokenized_corpora: tokenized = corpus.get_file(source_lang) original = os.path.join( original_root, corpus.name + '.' + source_lang) out.write(tokenized + ':' + original + '\n') target_merged_corpus = os.path.join(working_dir, 'corpus.' + target_lang) fileutils.merge([ corpus.get_file(target_lang) for corpus in tokenized_corpora ], target_merged_corpus) # Run MERT algorithm with cmdlogger.step('Tuning') as _: # Start MERT decoder_flags = ['--port', str(self.api.port)] if not context_enabled: decoder_flags.append('--skip-context-analysis') decoder_flags.append('1') fileutils.makedirs(mert_wd, exist_ok=True) with tempfile.NamedTemporaryFile() as runtime_moses_ini: command = [ self._mert_script, source_merged_corpus, target_merged_corpus, self._mert_i_script, runtime_moses_ini.name, '--threads', str(multiprocessing.cpu_count()), '--mertdir', os.path.join(Moses.bin_path, 'bin'), '--mertargs', '\'--binary --sctype BLEU\'', '--working-dir', mert_wd, '--nbest', '100', '--decoder-flags', '"' + ' '.join(decoder_flags) + '"', '--nonorm', '--closest', '--no-filter-phrase-table' ] with open(self.engine.get_logfile('mert'), 'wb') as log: shell.execute(' '.join(command), stdout=log, stderr=log) # Read optimized configuration with cmdlogger.step('Applying changes') as _: bleu_score = 0 weights = {} found_weights = False with open(os.path.join(mert_wd, 'moses.ini')) as moses_ini: for line in moses_ini: line = line.strip() if len(line) == 0: continue elif found_weights: tokens = line.split() weights[tokens[0].rstrip('=')] = [ float(val) for val in tokens[1:] ] elif line.startswith('# BLEU'): bleu_score = float(line.split()[2]) elif line == '[weight]': found_weights = True _ = self.api.update_features(weights) cmdlogger.completed(bleu_score) finally: if not debug: self.engine.clear_tempdir()
def build(self, roots, debug=False, steps=None, split_trainingset=True): self._temp_dir = self._engine.get_tempdir('training', ensure=True) source_lang = self._engine.source_lang target_lang = self._engine.target_lang bilingual_corpora, monolingual_corpora = ParallelCorpus.splitlist(source_lang, target_lang, roots=roots) if len(bilingual_corpora) == 0: raise IllegalArgumentException( 'you project does not include %s-%s data.' % (source_lang.upper(), target_lang.upper())) if steps is None: steps = self._engine.training_steps else: unknown_steps = [step for step in steps if step not in self._engine.training_steps] if len(unknown_steps) > 0: raise IllegalArgumentException('Unknown training steps: ' + str(unknown_steps)) cmdlogger = _builder_logger(len(steps) + 1) cmdlogger.start(self._engine, bilingual_corpora, monolingual_corpora) shutil.rmtree(self._engine.path, ignore_errors=True) os.makedirs(self._engine.path) # Check disk space constraints free_space_on_disk = fileutils.df(self._engine.path)[2] corpus_size_on_disk = 0 for root in roots: corpus_size_on_disk += fileutils.du(root) free_memory = fileutils.free() recommended_mem = self.__GB * corpus_size_on_disk / (350 * self.__MB) # 1G RAM every 350M on disk recommended_disk = 10 * corpus_size_on_disk if free_memory < recommended_mem or free_space_on_disk < recommended_disk: if free_memory < recommended_mem: print '> WARNING: more than %.fG of RAM recommended, only %.fG available' % \ (recommended_mem / self.__GB, free_memory / self.__GB) if free_space_on_disk < recommended_disk: print '> WARNING: more than %.fG of storage recommended, only %.fG available' % \ (recommended_disk / self.__GB, free_space_on_disk / self.__GB) print try: corpora_roots = roots unprocessed_bicorpora = bilingual_corpora unprocessed_mocorpora = monolingual_corpora # TM cleanup if 'tm_cleanup' in steps: with cmdlogger.step('TMs clean-up') as _: cleaned_output = self._get_tempdir('clean_tms') self._engine.cleaner.clean(source_lang, target_lang, roots, cleaned_output) for corpus in monolingual_corpora: cfile = corpus.get_file(target_lang) link = os.path.join(cleaned_output, os.path.basename(cfile)) os.symlink(cfile, link) corpora_roots = [cleaned_output] unprocessed_bicorpora, unprocessed_mocorpora = ParallelCorpus.splitlist(source_lang, target_lang, roots=corpora_roots) # Preprocessing processed_bicorpora = unprocessed_bicorpora processed_mocorpora = unprocessed_mocorpora if 'preprocess' in steps: with cmdlogger.step('Corpora preprocessing') as _: preprocessor_output = self._get_tempdir('preprocessed') processed_bicorpora, processed_mocorpora = self._engine.training_preprocessor.process( source_lang, target_lang, corpora_roots, preprocessor_output, (self._engine.data_path if split_trainingset else None) ) # Training Context Analyzer if 'context_analyzer' in steps: with cmdlogger.step('Context Analyzer training') as _: log_file = self._engine.get_logfile('training.context') self._engine.analyzer.create_index(unprocessed_bicorpora, source_lang, log_file=log_file) # Training Adaptive Language Model (on the target side of all bilingual corpora) if 'lm' in steps: with cmdlogger.step('Language Model training') as _: working_dir = self._get_tempdir('lm') log_file = self._engine.get_logfile('training.lm') self._engine.lm.train(processed_bicorpora + processed_mocorpora, target_lang, working_dir, log_file) # Training Translation Model if 'tm' in steps: with cmdlogger.step('Translation Model training') as _: working_dir = self._get_tempdir('tm') log_file = self._engine.get_logfile('training.tm') self._engine.pt.train(processed_bicorpora, self._engine.aligner, working_dir, log_file) # Writing config file with cmdlogger.step('Writing config files') as _: self._engine.write_configs() cmdlogger.completed() finally: if not debug: self._engine.clear_tempdir('training')
def build(self, roots, debug=False, steps=None, split_trainingset=True): self._temp_dir = self._engine.get_tempdir('training', ensure=True) source_lang = self._engine.source_lang target_lang = self._engine.target_lang bilingual_corpora, monolingual_corpora = ParallelCorpus.splitlist( source_lang, target_lang, roots=roots) if len(bilingual_corpora) == 0: raise IllegalArgumentException( 'you project does not include %s-%s data.' % (source_lang.upper(), target_lang.upper())) if steps is None: steps = self._engine.training_steps else: unknown_steps = [ step for step in steps if step not in self._engine.training_steps ] if len(unknown_steps) > 0: raise IllegalArgumentException('Unknown training steps: ' + str(unknown_steps)) cmdlogger = _builder_logger(len(steps) + 1) cmdlogger.start(self._engine, bilingual_corpora, monolingual_corpora) shutil.rmtree(self._engine.path, ignore_errors=True) os.makedirs(self._engine.path) # Check disk space constraints free_space_on_disk = fileutils.df(self._engine.path)[2] corpus_size_on_disk = 0 for root in roots: corpus_size_on_disk += fileutils.du(root) free_memory = fileutils.free() recommended_mem = self.__GB * corpus_size_on_disk / ( 350 * self.__MB) # 1G RAM every 350M on disk recommended_disk = 10 * corpus_size_on_disk if free_memory < recommended_mem or free_space_on_disk < recommended_disk: if free_memory < recommended_mem: print '> WARNING: more than %.fG of RAM recommended, only %.fG available' % \ (recommended_mem / self.__GB, free_memory / self.__GB) if free_space_on_disk < recommended_disk: print '> WARNING: more than %.fG of storage recommended, only %.fG available' % \ (recommended_disk / self.__GB, free_space_on_disk / self.__GB) print try: corpora_roots = roots unprocessed_bicorpora = bilingual_corpora unprocessed_mocorpora = monolingual_corpora # TM cleanup if 'tm_cleanup' in steps: with cmdlogger.step('TMs clean-up') as _: cleaned_output = self._get_tempdir('clean_tms') self._engine.cleaner.clean(source_lang, target_lang, roots, cleaned_output) for corpus in monolingual_corpora: cfile = corpus.get_file(target_lang) link = os.path.join(cleaned_output, os.path.basename(cfile)) os.symlink(cfile, link) corpora_roots = [cleaned_output] unprocessed_bicorpora, unprocessed_mocorpora = ParallelCorpus.splitlist( source_lang, target_lang, roots=corpora_roots) # Preprocessing processed_bicorpora = unprocessed_bicorpora processed_mocorpora = unprocessed_mocorpora if 'preprocess' in steps: with cmdlogger.step('Corpora preprocessing') as _: preprocessor_output = self._get_tempdir('preprocessed') processed_bicorpora, processed_mocorpora = self._engine.training_preprocessor.process( source_lang, target_lang, corpora_roots, preprocessor_output, (self._engine.data_path if split_trainingset else None)) # Training Context Analyzer if 'context_analyzer' in steps: with cmdlogger.step('Context Analyzer training') as _: log_file = self._engine.get_logfile('training.context') self._engine.analyzer.create_index(unprocessed_bicorpora, source_lang, log_file=log_file) # Training Adaptive Language Model (on the target side of all bilingual corpora) if 'lm' in steps: with cmdlogger.step('Language Model training') as _: working_dir = self._get_tempdir('lm') log_file = self._engine.get_logfile('training.lm') self._engine.lm.train( processed_bicorpora + processed_mocorpora, target_lang, working_dir, log_file) # Training Translation Model if 'tm' in steps: with cmdlogger.step('Translation Model training') as _: working_dir = self._get_tempdir('tm') log_file = self._engine.get_logfile('training.tm') self._engine.pt.train(processed_bicorpora, self._engine.aligner, working_dir, log_file) # Writing config file with cmdlogger.step('Writing config files') as _: self._engine.write_configs() cmdlogger.completed() finally: if not debug: self._engine.clear_tempdir('training')