Beispiel #1
0
    def __init__(self, models, dataset, params, params_prediction, params_training, model_tokenize_f, model_detokenize_f, general_tokenize_f,
                 general_detokenize_f, mapping=None, word2index_x=None, word2index_y=None, index2word_y=None,
                 excluded_words=None, unk_id=1, eos_symbol='/', online=False, verbose=0):
        self.models = models
        self.dataset = dataset
        self.params = params
        self.params_prediction = params_prediction
        self.params_training = params_training
        self.model_tokenize_f = model_tokenize_f
        self.model_detokenize_f = model_detokenize_f
        self.general_tokenize_f = general_tokenize_f
        self.general_detokenize_f = general_detokenize_f
        self.mapping = mapping
        self.excluded_words = excluded_words
        self.verbose = verbose
        self.eos_symbol = eos_symbol
        self.word2index_x = word2index_x if word2index_x is not None else \
            dataset.vocabulary[params_prediction['INPUTS_IDS_DATASET'][0]]['words2idx']
        self.index2word_y = index2word_y if index2word_y is not None else \
            dataset.vocabulary[params_prediction['OUTPUTS_IDS_DATASET'][0]]['idx2words']
        self.word2index_y = word2index_y if word2index_y is not None else \
            dataset.vocabulary[params_prediction['OUTPUTS_IDS_DATASET'][0]]['words2idx']
        self.unk_id = unk_id
        self.interactive_beam_searcher = InteractiveBeamSearchSampler(self.models,
                                                                      self.dataset,
                                                                      self.params_prediction,
                                                                      excluded_words=self.excluded_words,
                                                                      verbose=self.verbose)

        # Compile Theano sampling function by generating a fake sample # TODO: Find a better way of doing this
        logger.info('Compiling sampler...')
        self.generate_sample('i')
        logger.info('Done.')

        self.online = online
        if self.online:
            self.online_trainer = OnlineTrainer(self.models, self.dataset, None,  # Sampler
                                                None,  # Params prediction
                                                params_training,
                                                verbose=self.verbose)
            for i, nmt_model in enumerate(self.models):
                logger.info('Compiling model %d...' % i)
                nmt_model.model._make_train_function()
            logger.info('Done.')

        else:
            self.online_trainer = None
Beispiel #2
0
    def __init__(self,
                 models,
                 dataset,
                 params,
                 params_prediction,
                 params_training,
                 model_tokenize_f,
                 model_detokenize_f,
                 general_tokenize_f,
                 general_detokenize_f,
                 mapping=None,
                 word2index_x=None,
                 word2index_y=None,
                 index2word_y=None,
                 excluded_words=None,
                 unk_id=1,
                 eos_symbol='/',
                 online=False,
                 verbose=0):
        """
        Builds an NMTSampler: An object containing models and dataset, for the interactive-predictive and adaptive framework.
        :param models:
        :param dataset:
        :param dict params: All hyperparameters of the model.
        :param dict params_prediction: Hyperparameters regarding prediction and search.
        :param dict params_training:  Hyperparamters regarding incremental training.
        :param function model_tokenize_f: Function used for tokenizing the input sentence. E.g. BPE.
        :param function model_detokenize_f: Function used for detokenizing the output sentence. E.g. BPE revert.
        :param function general_tokenize_f: Function used for tokenizing the input sentence. E.g. Moses tokenizer.
        :param function general_detokenize_f: Function used for detokenizing the output sentence. E.g. Moses detokenizer.
        :param dict mapping: Source-target dictionary (for unk_replace heuristics 1 and 2).
        :param dict word2index_x: Mapping from word strings into indices for the source language.
        :param dict word2index_y: Mapping from word strings into indices for the target language.
        :param dict index2word_y: Mapping from indices into word strings for the target language.
        :param dict excluded_words: words that won't be generated in the middle of two isles. Currenly unused.
        :param int unk_id: Unknown word index.
        :param str eos_symbol: End-of-sentence symbol.
        :param bool online: Whether apply online learning after accepting each hypothesis.
        :param int verbose: Verbosity level.
        """

        self.models = models
        self.dataset = dataset
        self.params = params
        self.params_prediction = params_prediction
        self.params_training = params_training
        self.model_tokenize_f = model_tokenize_f
        self.model_detokenize_f = model_detokenize_f
        self.general_tokenize_f = general_tokenize_f
        self.general_detokenize_f = general_detokenize_f
        self.mapping = mapping
        self.excluded_words = excluded_words
        self.verbose = verbose
        self.eos_symbol = eos_symbol
        self.word2index_x = word2index_x if word2index_x is not None else \
            dataset.vocabulary[params_prediction['INPUTS_IDS_DATASET'][0]]['words2idx']
        self.index2word_y = index2word_y if index2word_y is not None else \
            dataset.vocabulary[params_prediction['OUTPUTS_IDS_DATASET'][0]]['idx2words']
        self.word2index_y = word2index_y if word2index_y is not None else \
            dataset.vocabulary[params_prediction['OUTPUTS_IDS_DATASET'][0]]['words2idx']
        self.unk_id = unk_id

        self.interactive_beam_searcher = InteractiveBeamSearchSampler(
            self.models,
            self.dataset,
            self.params_prediction,
            excluded_words=self.excluded_words,
            verbose=self.verbose)

        # Compile sampling function by generating a fake sample.
        # TODO: Find a better way of doing this
        logger.info('Compiling sampler...')
        self.generate_sample('i')
        logger.info('Done.')

        self.online = online
        if self.online:
            self.online_trainer = OnlineTrainer(
                self.models,
                self.dataset,
                None,  # Sampler
                None,  # Params prediction
                params_training,
                verbose=self.verbose)
            for i, nmt_model in enumerate(self.models):
                logger.info('Compiling model %d...' % i)
                nmt_model.model._make_train_function()
            logger.info('Done.')

        else:
            self.online_trainer = None
Beispiel #3
0
class NMTSampler:
    def __init__(self,
                 models,
                 dataset,
                 params,
                 params_prediction,
                 params_training,
                 model_tokenize_f,
                 model_detokenize_f,
                 general_tokenize_f,
                 general_detokenize_f,
                 mapping=None,
                 word2index_x=None,
                 word2index_y=None,
                 index2word_y=None,
                 excluded_words=None,
                 unk_id=1,
                 eos_symbol='/',
                 online=False,
                 verbose=0):
        """
        Builds an NMTSampler: An object containing models and dataset, for the interactive-predictive and adaptive framework.
        :param models:
        :param dataset:
        :param dict params: All hyperparameters of the model.
        :param dict params_prediction: Hyperparameters regarding prediction and search.
        :param dict params_training:  Hyperparamters regarding incremental training.
        :param function model_tokenize_f: Function used for tokenizing the input sentence. E.g. BPE.
        :param function model_detokenize_f: Function used for detokenizing the output sentence. E.g. BPE revert.
        :param function general_tokenize_f: Function used for tokenizing the input sentence. E.g. Moses tokenizer.
        :param function general_detokenize_f: Function used for detokenizing the output sentence. E.g. Moses detokenizer.
        :param dict mapping: Source-target dictionary (for unk_replace heuristics 1 and 2).
        :param dict word2index_x: Mapping from word strings into indices for the source language.
        :param dict word2index_y: Mapping from word strings into indices for the target language.
        :param dict index2word_y: Mapping from indices into word strings for the target language.
        :param dict excluded_words: words that won't be generated in the middle of two isles. Currenly unused.
        :param int unk_id: Unknown word index.
        :param str eos_symbol: End-of-sentence symbol.
        :param bool online: Whether apply online learning after accepting each hypothesis.
        :param int verbose: Verbosity level.
        """

        self.models = models
        self.dataset = dataset
        self.params = params
        self.params_prediction = params_prediction
        self.params_training = params_training
        self.model_tokenize_f = model_tokenize_f
        self.model_detokenize_f = model_detokenize_f
        self.general_tokenize_f = general_tokenize_f
        self.general_detokenize_f = general_detokenize_f
        self.mapping = mapping
        self.excluded_words = excluded_words
        self.verbose = verbose
        self.eos_symbol = eos_symbol
        self.word2index_x = word2index_x if word2index_x is not None else \
            dataset.vocabulary[params_prediction['INPUTS_IDS_DATASET'][0]]['words2idx']
        self.index2word_y = index2word_y if index2word_y is not None else \
            dataset.vocabulary[params_prediction['OUTPUTS_IDS_DATASET'][0]]['idx2words']
        self.word2index_y = word2index_y if word2index_y is not None else \
            dataset.vocabulary[params_prediction['OUTPUTS_IDS_DATASET'][0]]['words2idx']
        self.unk_id = unk_id

        self.interactive_beam_searcher = InteractiveBeamSearchSampler(
            self.models,
            self.dataset,
            self.params_prediction,
            excluded_words=self.excluded_words,
            verbose=self.verbose)

        # Compile sampling function by generating a fake sample.
        # TODO: Find a better way of doing this
        logger.info('Compiling sampler...')
        self.generate_sample('i')
        logger.info('Done.')

        self.online = online
        if self.online:
            self.online_trainer = OnlineTrainer(
                self.models,
                self.dataset,
                None,  # Sampler
                None,  # Params prediction
                params_training,
                verbose=self.verbose)
            for i, nmt_model in enumerate(self.models):
                logger.info('Compiling model %d...' % i)
                nmt_model.model._make_train_function()
            logger.info('Done.')

        else:
            self.online_trainer = None

    def generate_sample(self,
                        source_sentence,
                        validated_prefix=None,
                        max_N=5,
                        isle_indices=None,
                        filtered_idx2word=None,
                        unk_indices=None,
                        unk_words=None):
        """
        Generate sample via constrained search. Options labeled with <<isles>> are untested
        and likely require some modifications to correctly work.
        :param source_sentence: Source sentence.
        :param validated_prefix: Prefix to keep in the output.
        :param max_N: Maximum number of words to generate between validated segments. <<isles>>
        :param isle_indices: Indices of the validated segments. <<isles>>
        :param filtered_idx2word: List of candidate words to be the next one to generate (after generating fixed_words).
        :param unk_indices: Positions of the unknown words.
        :param unk_words: Unknown words.
        :return:
        """
        logger.log(2, 'Beam size: %d' % (self.params_prediction['beam_size']))
        generate_sample_start_time = time.time()
        if unk_indices is None:
            unk_indices = []
        if unk_words is None:
            unk_words = []

        tokenization_start_time = time.time()
        tokenized_input = self.general_tokenize_f(source_sentence,
                                                  escape=False)
        tokenized_input = self.model_tokenize_f(tokenized_input)
        tokenization_end_time = time.time()
        logger.log(
            2, 'tokenization time: %.6f' %
            (tokenization_end_time - tokenization_start_time))
        parse_input_start_time = time.time()
        # Go from text to indices
        src_seq = self.dataset.loadText(
            [tokenized_input],
            vocabularies=self.dataset.vocabulary[
                self.params['INPUTS_IDS_DATASET'][0]],
            max_len=self.params['MAX_INPUT_TEXT_LEN'],
            offset=0,
            fill=self.dataset.fill_text[self.params['INPUTS_IDS_DATASET'][0]],
            pad_on_batch=self.dataset.pad_on_batch[
                self.params['INPUTS_IDS_DATASET'][0]],
            words_so_far=False,
            loading_X=True)[0][0]

        parse_input_end_time = time.time()
        logger.log(
            2, 'parse_input time: %.6f' %
            (parse_input_end_time - parse_input_start_time))

        fixed_words_user = OrderedDict()
        unk_words_dict = OrderedDict()
        # If the user provided some feedback...
        if validated_prefix is not None:
            next_correction = validated_prefix[-1]
            if next_correction == self.eos_symbol:
                return validated_prefix[:-1].decode('utf-8')

            # 2.2.4 Tokenize the prefix properly (possibly applying BPE)
            #  TODO: Here we are tokenizing the target language with the source language tokenizer
            prefix_tokenization_start_time = time.time()
            tokenized_validated_prefix = self.general_tokenize_f(
                validated_prefix, escape=False)
            tokenized_validated_prefix = self.model_tokenize_f(
                tokenized_validated_prefix)
            prefix_tokenization_end_time = time.time()
            logger.log(
                2, 'prefix_tokenization time: %.6f' %
                (prefix_tokenization_end_time -
                 prefix_tokenization_start_time))

            # 2.2.5 Validate words
            word_validation_start_time = time.time()
            for pos, word in enumerate(tokenized_validated_prefix.split()):
                fixed_words_user[pos] = self.word2index_y.get(
                    word, self.unk_id)
                if self.word2index_y.get(word) is None:
                    unk_words_dict[pos] = word
            word_validation_end_time = time.time()
            logger.log(
                2, 'word_validation time: %.6f' %
                (word_validation_end_time - word_validation_start_time))

            # 2.2.6 Constrain search for the last word
            constrain_search_start_time = time.time()
            last_user_word_pos = list(fixed_words_user.keys())[-1]
            if next_correction != u' ':
                last_user_word = tokenized_validated_prefix.split()[-1]
                filtered_idx2word = dict(
                    (self.word2index_y[candidate_word], candidate_word)
                    for candidate_word in self.word2index_y
                    if candidate_word[:len(last_user_word)] == last_user_word)

                if filtered_idx2word != dict():
                    del fixed_words_user[last_user_word_pos]
                    if last_user_word_pos in list(unk_words_dict.keys()):
                        del unk_words_dict[last_user_word_pos]
            else:
                filtered_idx2word = dict()
            constrain_search_end_time = time.time()
            logger.log(
                2, 'constrain_search_end_time time: %.6f' %
                (constrain_search_end_time - constrain_search_start_time))

        sample_beam_search_start_time = time.time()
        trans_indices, costs, alphas = \
            self.interactive_beam_searcher.sample_beam_search_interactive(src_seq,
                                                                          fixed_words=copy.copy(fixed_words_user),
                                                                          max_N=max_N,
                                                                          isles=isle_indices,
                                                                          valid_next_words=filtered_idx2word,
                                                                          idx2word=self.index2word_y)
        sample_beam_search_end_time = time.time()
        logger.log(
            2, 'sample_beam_search time: %.6f' %
            (sample_beam_search_end_time - sample_beam_search_start_time))

        if False and self.params_prediction['pos_unk']:
            alphas = [alphas]
            sources = [tokenized_input]
            heuristic = self.params_prediction['heuristic']
        else:
            alphas = None
            heuristic = None
            sources = None

        # 1.2 Decode hypothesis
        decoding_predictions_start_time = time.time()
        hypothesis = decode_predictions_beam_search([trans_indices],
                                                    self.index2word_y,
                                                    alphas=alphas,
                                                    x_text=sources,
                                                    heuristic=heuristic,
                                                    mapping=self.mapping,
                                                    pad_sequences=True,
                                                    verbose=0)[0]
        decoding_predictions_end_time = time.time()
        logger.log(
            2, 'decoding_predictions time: %.6f' %
            (decoding_predictions_end_time - decoding_predictions_start_time))

        # UNK words management
        unk_management_start_time = time.time()
        unk_indices = list(unk_words_dict)
        unk_words = list(unk_words_dict.values())
        if len(unk_indices) > 0:  # If we added some UNK word
            hypothesis = hypothesis.split()
            if len(hypothesis) < len(
                    unk_indices
            ):  # The full hypothesis will be made up UNK words:
                for i, index in enumerate(range(0, len(hypothesis))):
                    hypothesis[index] = unk_words[unk_indices[i]]
                for ii in range(i + 1, len(unk_words)):
                    hypothesis.append(unk_words[ii])
            else:  # We put each unknown word in the corresponding gap
                for i, index in enumerate(unk_indices):
                    if index < len(hypothesis):
                        hypothesis[index] = unk_words[i]
                    else:
                        hypothesis.append(unk_words[i])
            hypothesis = u' '.join(hypothesis)
        unk_management_end_time = time.time()
        logger.log(
            2, 'unk_management time: %.6f' %
            (unk_management_end_time - unk_management_start_time))

        hypothesis_detokenization_start_time = time.time()
        hypothesis = self.model_detokenize_f(hypothesis)
        hypothesis = self.general_detokenize_f(hypothesis, unescape=False)
        hypothesis_detokenization_end_time = time.time()
        logger.log(
            2, 'hypothesis_detokenization time: %.6f' %
            (hypothesis_detokenization_end_time -
             hypothesis_detokenization_start_time))
        generate_sample_end_time = time.time()
        logger.log(
            2, 'generate_sample time: %.6f' %
            (generate_sample_end_time - generate_sample_start_time))
        return hypothesis

    def learn_from_sample(self, source_sentence, target_sentence):
        """
        Incrementally adapt the model with the validated sample.
        :param source_sentence: Source sentence (x).
        :param target_sentence: Target sentence (y).
        :return:
        """
        # Tokenize input
        tokenized_input = self.general_tokenize_f(source_sentence,
                                                  escape=False)
        tokenized_input = self.model_tokenize_f(tokenized_input)
        src_seq = self.dataset.loadText(
            [tokenized_input],
            vocabularies=self.dataset.vocabulary[
                self.params['INPUTS_IDS_DATASET'][0]],
            max_len=self.params['MAX_INPUT_TEXT_LEN'],
            offset=0,
            fill=self.dataset.fill_text[self.params['INPUTS_IDS_DATASET'][0]],
            pad_on_batch=self.dataset.pad_on_batch[
                self.params['INPUTS_IDS_DATASET'][0]],
            words_so_far=False,
            loading_X=True)[0][0]
        # Tokenize output
        tokenized_reference = self.general_tokenize_f(target_sentence,
                                                      escape=False)
        tokenized_reference = self.model_tokenize_f(tokenized_reference)

        # Build inputs/outpus of the system
        state_below = self.dataset.loadText(
            [tokenized_reference],
            vocabularies=self.dataset.vocabulary[
                self.params['OUTPUTS_IDS_DATASET'][0]],
            max_len=self.params['MAX_OUTPUT_TEXT_LEN_TEST'],
            offset=1,
            fill=self.dataset.fill_text[self.params['INPUTS_IDS_DATASET'][-1]],
            pad_on_batch=self.dataset.pad_on_batch[
                self.params['INPUTS_IDS_DATASET'][-1]],
            words_so_far=False,
            loading_X=True)[0]

        # 4.1.3 Ground truth sample -> Interactively translated sentence
        # TODO: Load dense-text if necessary
        trg_seq = self.dataset.loadTextOneHot(
            [tokenized_reference],
            vocabularies=self.dataset.vocabulary[
                self.params['OUTPUTS_IDS_DATASET'][0]],
            vocabulary_len=self.dataset.vocabulary_len[
                self.params['OUTPUTS_IDS_DATASET'][0]],
            max_len=self.params['MAX_OUTPUT_TEXT_LEN_TEST'],
            offset=0,
            fill=self.dataset.fill_text[self.params['OUTPUTS_IDS_DATASET'][0]],
            pad_on_batch=self.dataset.pad_on_batch[
                self.params['OUTPUTS_IDS_DATASET'][0]],
            words_so_far=False,
            sample_weights=self.params['SAMPLE_WEIGHTS'],
            loading_X=False)
        # 4.2 Train online!
        if self.online_trainer is not None:
            self.online_trainer.train_online(
                [np.asarray([src_seq]), state_below],
                trg_seq,
                trg_words=[target_sentence])
        else:
            logger.warning('Online learning is disabled.')
Beispiel #4
0
class NMTSampler:
    def __init__(self, models, dataset, params, params_prediction, params_training, model_tokenize_f, model_detokenize_f, general_tokenize_f,
                 general_detokenize_f, mapping=None, word2index_x=None, word2index_y=None, index2word_y=None,
                 excluded_words=None, unk_id=1, eos_symbol='/', online=False, verbose=0):
        self.models = models
        self.dataset = dataset
        self.params = params
        self.params_prediction = params_prediction
        self.params_training = params_training
        self.model_tokenize_f = model_tokenize_f
        self.model_detokenize_f = model_detokenize_f
        self.general_tokenize_f = general_tokenize_f
        self.general_detokenize_f = general_detokenize_f
        self.mapping = mapping
        self.excluded_words = excluded_words
        self.verbose = verbose
        self.eos_symbol = eos_symbol
        self.word2index_x = word2index_x if word2index_x is not None else \
            dataset.vocabulary[params_prediction['INPUTS_IDS_DATASET'][0]]['words2idx']
        self.index2word_y = index2word_y if index2word_y is not None else \
            dataset.vocabulary[params_prediction['OUTPUTS_IDS_DATASET'][0]]['idx2words']
        self.word2index_y = word2index_y if word2index_y is not None else \
            dataset.vocabulary[params_prediction['OUTPUTS_IDS_DATASET'][0]]['words2idx']
        self.unk_id = unk_id
        self.interactive_beam_searcher = InteractiveBeamSearchSampler(self.models,
                                                                      self.dataset,
                                                                      self.params_prediction,
                                                                      excluded_words=self.excluded_words,
                                                                      verbose=self.verbose)

        # Compile Theano sampling function by generating a fake sample # TODO: Find a better way of doing this
        logger.info('Compiling sampler...')
        self.generate_sample('i')
        logger.info('Done.')

        self.online = online
        if self.online:
            self.online_trainer = OnlineTrainer(self.models, self.dataset, None,  # Sampler
                                                None,  # Params prediction
                                                params_training,
                                                verbose=self.verbose)
            for i, nmt_model in enumerate(self.models):
                logger.info('Compiling model %d...' % i)
                nmt_model.model._make_train_function()
            logger.info('Done.')

        else:
            self.online_trainer = None

    def generate_sample(self, source_sentence, validated_prefix=None, max_N=5, isle_indices=None,
                        filtered_idx2word=None, unk_indices=None, unk_words=None):
        print ("In params prediction beam_size: ", self.params_prediction['beam_size'])
        logger.log(2, 'Beam size: %d' % (self.params_prediction['beam_size']))
        generate_sample_start_time = time.time()
        if unk_indices is None:
            unk_indices = []
        if unk_words is None:
            unk_words = []

        tokenization_start_time = time.time()
        tokenized_input = self.general_tokenize_f(source_sentence, escape=False)
        tokenized_input = self.model_tokenize_f(tokenized_input)
        tokenization_end_time = time.time()
        logger.log(2, 'tokenization time: %.6f' % (tokenization_end_time - tokenization_start_time))
        parse_input_start_time = time.time()
        src_seq, src_words = parse_input(tokenized_input, self.dataset, self.word2index_x)
        parse_input_end_time = time.time()
        logger.log(2, 'parse_input time: %.6f' % (parse_input_end_time - parse_input_start_time))

        fixed_words_user = OrderedDict()
        unk_words_dict = OrderedDict()
        # If the user provided some feedback...
        if validated_prefix is not None:
            next_correction = validated_prefix[-1]
            if next_correction == self.eos_symbol:
                return validated_prefix[:-1].decode('utf-8')

            # 2.2.4 Tokenize the prefix properly (possibly applying BPE)
            #  TODO: Here we are tokenizing the target language with the source language tokenizer
            prefix_tokenization_start_time = time.time()
            tokenized_validated_prefix = self.general_tokenize_f(validated_prefix, escape=False)
            tokenized_validated_prefix = self.model_tokenize_f(tokenized_validated_prefix)
            prefix_tokenization_end_time = time.time()
            logger.log(2, 'prefix_tokenization time: %.6f' % (prefix_tokenization_end_time - prefix_tokenization_start_time))

            # 2.2.5 Validate words
            word_validation_start_time = time.time()
            for pos, word in enumerate(tokenized_validated_prefix.split()):
                fixed_words_user[pos] = self.word2index_y.get(word, self.unk_id)
                if self.word2index_y.get(word) is None:
                    unk_words_dict[pos] = word
            word_validation_end_time = time.time()
            logger.log(2, 'word_validation time: %.6f' % (word_validation_end_time - word_validation_start_time))

            # 2.2.6 Constrain search for the last word
            constrain_search_start_time = time.time()
            last_user_word_pos = list(fixed_words_user.keys())[-1]
            if next_correction != u' ':
                last_user_word = tokenized_validated_prefix.split()[-1]
                filtered_idx2word = dict((self.word2index_y[candidate_word], candidate_word)
                                         for candidate_word in self.word2index_y if candidate_word[:len(last_user_word)] == last_user_word)

                # if candidate_word.decode('utf-8')[:len(last_user_word)] == last_user_word)
                if filtered_idx2word != dict():
                    del fixed_words_user[last_user_word_pos]
                    if last_user_word_pos in list(unk_words_dict.keys()):
                        del unk_words_dict[last_user_word_pos]
            else:
                filtered_idx2word = dict()
            constrain_search_end_time = time.time()
            logger.log(2, 'constrain_search_end_time time: %.6f' % (constrain_search_end_time - constrain_search_start_time))

        sample_beam_search_start_time = time.time()
        trans_indices, costs, alphas = \
            self.interactive_beam_searcher.sample_beam_search_interactive(src_seq,
                                                                          fixed_words=copy.copy(fixed_words_user),
                                                                          max_N=max_N,
                                                                          isles=isle_indices,
                                                                          valid_next_words=filtered_idx2word,
                                                                          idx2word=self.index2word_y)
        sample_beam_search_end_time = time.time()
        logger.log(2, 'sample_beam_search time: %.6f' % (sample_beam_search_end_time - sample_beam_search_start_time))

        # # Substitute possible unknown words in isles
        # unk_in_isles = []
        # for isle_idx, isle_sequence, isle_words in unks_in_isles:
        #     if unk_id in isle_sequence:
        #         unk_in_isles.append((subfinder(isle_sequence, list(trans_indices)), isle_words))

        if False and self.params_prediction['pos_unk']:
            alphas = [alphas]
            sources = [tokenized_input]
            heuristic = self.params_prediction['heuristic']
        else:
            alphas = None
            heuristic = None
            sources = None

        # 1.2 Decode hypothesis
        decoding_predictions_start_time = time.time()
        hypothesis = decode_predictions_beam_search([trans_indices],
                                                    self.index2word_y,
                                                    alphas=alphas,
                                                    x_text=sources,
                                                    heuristic=heuristic,
                                                    mapping=self.mapping,
                                                    pad_sequences=True,
                                                    verbose=0)[0]
        decoding_predictions_end_time = time.time()
        logger.log(2, 'decoding_predictions time: %.6f' % (decoding_predictions_end_time - decoding_predictions_start_time))

        # for (words_idx, starting_pos), words in unk_in_isles:
        #     for pos_unk_word, pos_hypothesis in enumerate(range(starting_pos, starting_pos + len(words_idx))):
        #         hypothesis[pos_hypothesis] = words[pos_unk_word]

        # UNK words management
        unk_management_start_time = time.time()
        unk_indices = list(unk_words_dict)
        unk_words = list(unk_words_dict.values())
        if len(unk_indices) > 0:  # If we added some UNK word
            hypothesis = hypothesis.split()
            if len(hypothesis) < len(unk_indices):  # The full hypothesis will be made up UNK words:
                for i, index in enumerate(range(0, len(hypothesis))):
                    hypothesis[index] = unk_words[unk_indices[i]]
                for ii in range(i + 1, len(unk_words)):
                    hypothesis.append(unk_words[ii])
            else:  # We put each unknown word in the corresponding gap
                for i, index in enumerate(unk_indices):
                    if index < len(hypothesis):
                        hypothesis[index] = unk_words[i]
                    else:
                        hypothesis.append(unk_words[i])
            hypothesis = u' '.join(hypothesis)
        unk_management_end_time = time.time()
        logger.log(2, 'unk_management time: %.6f' % (unk_management_end_time - unk_management_start_time))

        hypothesis_detokenization_start_time = time.time()
        hypothesis = self.model_detokenize_f(hypothesis)
        hypothesis = self.general_detokenize_f(hypothesis, unescape=False)
        hypothesis_detokenization_end_time = time.time()
        logger.log(2, 'hypothesis_detokenization time: %.6f' % (hypothesis_detokenization_end_time - hypothesis_detokenization_start_time))
        generate_sample_end_time = time.time()
        logger.log(2, 'generate_sample time: %.6f' % (generate_sample_end_time - generate_sample_start_time))
        return hypothesis

    def learn_from_sample(self, source_sentence, target_sentence):

        # Tokenize input
        tokenized_input = self.general_tokenize_f(source_sentence, escape=False)
        tokenized_input = self.model_tokenize_f(tokenized_input)
        src_seq, src_words = parse_input(tokenized_input, self.dataset, self.word2index_x)

        # Tokenize output
        tokenized_reference = self.general_tokenize_f(target_sentence, escape=False)
        tokenized_reference = self.model_tokenize_f(tokenized_reference)

        # Build inputs/outpus of the system
        state_below = self.dataset.loadText([tokenized_reference.encode('utf-8')],
                                            vocabularies=self.dataset.vocabulary[self.params['OUTPUTS_IDS_DATASET'][0]],
                                            max_len=self.params['MAX_OUTPUT_TEXT_LEN_TEST'],
                                            offset=1,
                                            fill=self.dataset.fill_text[self.params['INPUTS_IDS_DATASET'][-1]],
                                            pad_on_batch=self.dataset.pad_on_batch[self.params['INPUTS_IDS_DATASET'][-1]],
                                            words_so_far=False,
                                            loading_X=True)[0]

        # 4.1.3 Ground truth sample -> Interactively translated sentence
        # TODO: Load dense_text if necessary
        trg_seq = self.dataset.loadTextOneHot([tokenized_reference.encode('utf-8')],
                                              vocabularies=self.dataset.vocabulary[self.params['OUTPUTS_IDS_DATASET'][0]],
                                              vocabulary_len=self.dataset.vocabulary_len[self.params['OUTPUTS_IDS_DATASET'][0]],
                                              max_len=self.params['MAX_OUTPUT_TEXT_LEN_TEST'],
                                              offset=0,
                                              fill=self.dataset.fill_text[self.params['OUTPUTS_IDS_DATASET'][0]],
                                              pad_on_batch=self.dataset.pad_on_batch[self.params['OUTPUTS_IDS_DATASET'][0]],
                                              words_so_far=False,
                                              sample_weights=self.params['SAMPLE_WEIGHTS'],
                                              loading_X=False)
        # 4.2 Train online!
        if self.online_trainer is not None:
            self.online_trainer.train_online([np.asarray([src_seq]), state_below], trg_seq, trg_words=[target_sentence])
        else:
            logging.warning('Online learning is disabled.')
                         set_optimizer=False) for i in range(len(args.models))
    ]
    models = [
        updateModel(model, path, -1, full_path=True)
        for (model, path) in zip(model_instances, args.models)
    ]

    # Set additional inputs to models if using a custom loss function
    params['USE_CUSTOM_LOSS'] = True if 'PAS' in params['OPTIMIZER'] else False
    if params.get('N_BEST_OPTIMIZER', False):
        logging.info('Using N-best optimizer')

    models = build_online_models(models, params)
    online_trainer = OnlineTrainer(models,
                                   dataset,
                                   None,
                                   None,
                                   params_training,
                                   verbose=args.verbose)
    # Load text files
    fsrc = codecs.open(args.source, 'r',
                       encoding='utf-8')  # File with source sentences.
    source_lines = fsrc.read().split('\n')
    if source_lines[-1] == u'':
        source_lines = source_lines[:-1]
    n_sentences = len(source_lines)
    ftrans = codecs.open(
        args.dest, 'w', encoding='utf-8'
    )  # Destination file of the (post edited) translations.
    logger.info("<<< Storing corrected hypotheses into: %s >>>" %
                str(args.dest))
Beispiel #6
0
def main():
    args = parse_args()
    server_address = ('', args.port)
    httpd = BaseHTTPServer.HTTPServer(server_address, NMTHandler)

    if args.config is None:
        logging.info("Reading parameters from config.py")
        from config import load_parameters
        params = load_parameters()
    else:
        logging.info("Loading parameters from %s" % str(args.config))
        params = pkl2dict(args.config)
    try:
        for arg in args.changes:
            try:
                k, v = arg.split('=')
            except ValueError:
                print 'Overwritten arguments must have the form key=Value. \n Currently are: %s' % str(
                    args.changes)
                exit(1)
            try:
                params[k] = ast.literal_eval(v)
            except ValueError:
                params[k] = v
    except ValueError:
        print 'Error processing arguments: (', k, ",", v, ")"
        exit(2)
    dataset = loadDataset(args.dataset)

    # For converting predictions into sentences
    # Dataset backwards compatibility
    bpe_separator = dataset.BPE_separator if hasattr(
        dataset,
        "BPE_separator") and dataset.BPE_separator is not None else '@@'
    # Build BPE tokenizer if necessary
    if 'bpe' in params['TOKENIZATION_METHOD'].lower():
        logger.info('Building BPE')
        if not dataset.BPE_built:
            dataset.build_bpe(
                params.get('BPE_CODES_PATH',
                           params['DATA_ROOT_PATH'] + '/training_codes.joint'),
                bpe_separator)
    # Build tokenization function
    tokenize_f = eval('dataset.' +
                      params.get('TOKENIZATION_METHOD', 'tokenize_none'))

    detokenize_function = eval(
        'dataset.' + params.get('DETOKENIZATION_METHOD', 'detokenize_none'))
    dataset.build_moses_tokenizer(language=params['SRC_LAN'])
    dataset.build_moses_detokenizer(language=params['TRG_LAN'])
    tokenize_general = dataset.tokenize_moses
    detokenize_general = dataset.detokenize_moses

    params_prediction = dict()
    params_prediction['max_batch_size'] = params.get('BATCH_SIZE', 20)
    params_prediction['n_parallel_loaders'] = params.get('PARALLEL_LOADERS', 1)
    params_prediction['beam_size'] = params.get('BEAM_SIZE', 6)
    params_prediction['maxlen'] = params.get('MAX_OUTPUT_TEXT_LEN_TEST', 100)
    params_prediction['optimized_search'] = params['OPTIMIZED_SEARCH']
    params_prediction['model_inputs'] = params['INPUTS_IDS_MODEL']
    params_prediction['model_outputs'] = params['OUTPUTS_IDS_MODEL']
    params_prediction['dataset_inputs'] = params['INPUTS_IDS_DATASET']
    params_prediction['dataset_outputs'] = params['OUTPUTS_IDS_DATASET']
    params_prediction['search_pruning'] = params.get('SEARCH_PRUNING', False)
    params_prediction['normalize_probs'] = params.get('NORMALIZE_SAMPLING',
                                                      False)
    params_prediction['alpha_factor'] = params.get('ALPHA_FACTOR', 1.0)
    params_prediction['coverage_penalty'] = params.get('COVERAGE_PENALTY',
                                                       False)
    params_prediction['length_penalty'] = params.get('LENGTH_PENALTY', False)
    params_prediction['length_norm_factor'] = params.get(
        'LENGTH_NORM_FACTOR', 0.0)
    params_prediction['coverage_norm_factor'] = params.get(
        'COVERAGE_NORM_FACTOR', 0.0)
    params_prediction['pos_unk'] = params.get('POS_UNK', False)
    params_prediction['heuristic'] = params.get('HEURISTIC', 0)

    params_prediction['state_below_maxlen'] = -1 if params.get('PAD_ON_BATCH', True) \
        else params.get('MAX_OUTPUT_TEXT_LEN', 50)
    params_prediction['output_max_length_depending_on_x'] = params.get(
        'MAXLEN_GIVEN_X', True)
    params_prediction['output_max_length_depending_on_x_factor'] = params.get(
        'MAXLEN_GIVEN_X_FACTOR', 3)
    params_prediction['output_min_length_depending_on_x'] = params.get(
        'MINLEN_GIVEN_X', True)
    params_prediction['output_min_length_depending_on_x_factor'] = params.get(
        'MINLEN_GIVEN_X_FACTOR', 2)
    # Manage pos_unk strategies
    if params['POS_UNK']:
        mapping = None if dataset.mapping == dict() else dataset.mapping
    else:
        mapping = None

    if args.online:
        logging.info('Loading models from %s' % str(args.models))

        model_instances = [
            TranslationModel(params,
                             model_type=params['MODEL_TYPE'],
                             verbose=params['VERBOSE'],
                             model_name=params['MODEL_NAME'] + '_' + str(i),
                             vocabularies=dataset.vocabulary,
                             store_path=params['STORE_PATH'],
                             set_optimizer=False)
            for i in range(len(args.models))
        ]
        models = [
            updateModel(model, path, -1, full_path=True)
            for (model, path) in zip(model_instances, args.models)
        ]

        # Set additional inputs to models if using a custom loss function
        params['USE_CUSTOM_LOSS'] = True if 'PAS' in params[
            'OPTIMIZER'] else False
        if params['N_BEST_OPTIMIZER']:
            logging.info('Using N-best optimizer')

        models = build_online_models(models, params)
        online_trainer = OnlineTrainer(models,
                                       dataset,
                                       None,
                                       None,
                                       params_training,
                                       verbose=args.verbose)
    else:
        models = [loadModel(m, -1, full_path=True) for m in args.models]

    params['INPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len[
        params['INPUTS_IDS_DATASET'][0]]
    params['OUTPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len[
        params['OUTPUTS_IDS_DATASET'][0]]

    # Get word2index and index2word dictionaries
    index2word_y = dataset.vocabulary[params['OUTPUTS_IDS_DATASET']
                                      [0]]['idx2words']
    word2index_y = dataset.vocabulary[params['OUTPUTS_IDS_DATASET']
                                      [0]]['words2idx']
    index2word_x = dataset.vocabulary[params['INPUTS_IDS_DATASET']
                                      [0]]['idx2words']
    word2index_x = dataset.vocabulary[params['INPUTS_IDS_DATASET']
                                      [0]]['words2idx']

    excluded_words = None
    interactive_beam_searcher = NMTSampler(models,
                                           dataset,
                                           params_prediction,
                                           tokenize_f,
                                           detokenize_function,
                                           tokenize_general,
                                           detokenize_general,
                                           mapping=mapping,
                                           word2index_x=word2index_x,
                                           word2index_y=word2index_y,
                                           index2word_y=index2word_y,
                                           excluded_words=excluded_words,
                                           verbose=args.verbose)

    # Compile Theano sampling function by generating a fake sample # TODO: Find a better way of doing this
    print "Compiling sampler..."
    interactive_beam_searcher.generate_sample('i')

    httpd.sampler = interactive_beam_searcher

    print 'Server starting at localhost:' + str(args.port)
    httpd.serve_forever()
def interactive_simulation():

    args = parse_args()
    # Update parameters
    if args.config is not None:
        logger.info('Reading parameters from %s.' % args.config)
        params = update_parameters({}, pkl2dict(args.config))
    else:
        logger.info('Reading parameters from config.py.')
        params = load_parameters()

    if args.online:
        from config_online import load_parameters as load_parameters_online
        online_parameters = load_parameters_online(params)
        params = update_parameters(params, online_parameters)

    try:
        for arg in args.changes:
            try:
                k, v = arg.split('=')
            except ValueError:
                print(
                    'Overwritten arguments must have the form key=Value. \n Currently are: %s'
                    % str(args.changes))
                exit(1)
            try:
                params[k] = ast.literal_eval(v)
            except ValueError:
                params[k] = v
    except ValueError:
        print('Error processing arguments: (', k, ",", v, ")")
        exit(2)

    check_params(params)
    if args.verbose:
        logging.info("params = " + str(params))
    dataset = loadDataset(args.dataset)
    # dataset = update_dataset_from_file(dataset, args.source, params, splits=args.splits, remove_outputs=True)
    # Dataset backwards compatibility
    bpe_separator = dataset.BPE_separator if hasattr(
        dataset,
        "BPE_separator") and dataset.BPE_separator is not None else u'@@'
    # Set tokenization method
    params[
        'TOKENIZATION_METHOD'] = 'tokenize_bpe' if args.tokenize_bpe else params.get(
            'TOKENIZATION_METHOD', 'tokenize_none')
    # Build BPE tokenizer if necessary
    if 'bpe' in params['TOKENIZATION_METHOD'].lower():
        logger.info('Building BPE')
        if not dataset.BPE_built:
            dataset.build_bpe(params.get(
                'BPE_CODES_PATH',
                params['DATA_ROOT_PATH'] + '/training_codes.joint'),
                              separator=bpe_separator)
    # Build tokenization function
    tokenize_f = eval('dataset.' +
                      params.get('TOKENIZATION_METHOD', 'tokenize_none'))

    if args.online:
        # Traning params
        params_training = {  # Traning params
            'n_epochs': params['MAX_EPOCH'],
            'shuffle': False,
            'loss': params.get('LOSS', 'categorical_crossentropy'),
            'batch_size': params.get('BATCH_SIZE', 1),
            'homogeneous_batches': False,
            'optimizer': params.get('OPTIMIZER', 'SGD'),
            'lr': params.get('LR', 0.1),
            'lr_decay': params.get('LR_DECAY', None),
            'lr_gamma': params.get('LR_GAMMA', 1.),
            'epochs_for_save': -1,
            'verbose': args.verbose,
            'eval_on_sets': params['EVAL_ON_SETS_KERAS'],
            'n_parallel_loaders': params['PARALLEL_LOADERS'],
            'extra_callbacks': [],  # callbacks,
            'reload_epoch': 0,
            'epoch_offset': 0,
            'data_augmentation': params['DATA_AUGMENTATION'],
            'patience': params.get('PATIENCE', 0),
            'metric_check': params.get('STOP_METRIC', None),
            'eval_on_epochs': params.get('EVAL_EACH_EPOCHS', True),
            'each_n_epochs': params.get('EVAL_EACH', 1),
            'start_eval_on_epoch': params.get('START_EVAL_ON_EPOCH', 0),
            'additional_training_settings': {
                'k': params.get('K', 1),
                'tau': params.get('TAU', 1),
                'lambda': params.get('LAMBDA', 0.5),
                'c': params.get('C', 0.5),
                'd': params.get('D', 0.5)
            }
        }
    else:
        params_training = dict()

    params['OUTPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len[
        params['OUTPUTS_IDS_DATASET'][0]]
    logger.info("<<< Using an ensemble of %d models >>>" % len(args.models))
    if args.online:
        # Load trainable model(s)
        logging.info('Loading models from %s' % str(args.models))
        model_instances = [
            Captioning_Model(params,
                             model_type=params['MODEL_TYPE'],
                             verbose=params['VERBOSE'],
                             model_name=params['MODEL_NAME'] + '_' + str(i),
                             vocabularies=dataset.vocabulary,
                             store_path=params['STORE_PATH'],
                             clear_dirs=False,
                             set_optimizer=False)
            for i in range(len(args.models))
        ]
        models = [
            updateModel(model, path, -1, full_path=True)
            for (model, path) in zip(model_instances, args.models)
        ]

        # Set additional inputs to models if using a custom loss function
        params['USE_CUSTOM_LOSS'] = True if 'PAS' in params[
            'OPTIMIZER'] else False
        if params['N_BEST_OPTIMIZER']:
            logging.info('Using N-best optimizer')

        models = build_online_models(models, params)
        online_trainer = OnlineTrainer(models,
                                       dataset,
                                       None,
                                       None,
                                       params_training,
                                       verbose=args.verbose)
    else:
        # Otherwise, load regular model(s)
        models = [loadModel(m, -1, full_path=True) for m in args.models]

    # Load text files
    logger.info("<<< Storing corrected hypotheses into: %s >>>" %
                str(args.dest))
    ftrans = open(args.dest, 'w')
    ftrans.close()

    # Do we want to save the original sentences?
    if args.original_dest is not None:
        logger.info("<<< Storing original hypotheses into: %s >>>" %
                    str(args.original_dest))
        ftrans_ori = open(args.original_dest, 'w')
        ftrans_ori.close()

    if args.references is not None:
        ftrg = codecs.open(args.references, 'r', encoding='utf-8'
                           )  # File with post-edited (or reference) sentences.
        all_references = ftrg.read().split('\n')
        if all_references[-1] == u'':
            all_references = all_references[:-1]

    # Get word2index and index2word dictionaries
    index2word_y = dataset.vocabulary[params['OUTPUTS_IDS_DATASET']
                                      [0]]['idx2words']
    word2index_y = dataset.vocabulary[params['OUTPUTS_IDS_DATASET']
                                      [0]]['words2idx']
    unk_id = dataset.extra_words['<unk>']

    # Initialize counters
    total_errors = 0
    total_words = 0
    total_chars = 0
    total_mouse_actions = 0
    try:
        for s in args.splits:
            # Apply model predictions
            params_prediction = {
                'max_batch_size':
                params['BATCH_SIZE'],
                'n_parallel_loaders':
                params['PARALLEL_LOADERS'],
                'predict_on_sets': [s],
                'beam_size':
                params['BEAM_SIZE'],
                'maxlen':
                params['MAX_OUTPUT_TEXT_LEN_TEST'],
                'optimized_search':
                params['OPTIMIZED_SEARCH'],
                'model_inputs':
                params['INPUTS_IDS_MODEL'],
                'model_outputs':
                params['OUTPUTS_IDS_MODEL'],
                'dataset_inputs':
                params['INPUTS_IDS_DATASET'],
                'dataset_outputs':
                params['OUTPUTS_IDS_DATASET'],
                'normalize_probs':
                params.get('NORMALIZE_SAMPLING', False),
                'alpha_factor':
                params.get('ALPHA_FACTOR', 1.0),
                'normalize':
                params.get('NORMALIZATION', False),
                'normalization_type':
                params.get('NORMALIZATION_TYPE', None),
                'data_augmentation':
                params.get('DATA_AUGMENTATION', False),
                'mean_substraction':
                params.get('MEAN_SUBTRACTION', False),
                'wo_da_patch_type':
                params.get('WO_DA_PATCH_TYPE', 'whole'),
                'da_patch_type':
                params.get('DA_PATCH_TYPE', 'resize_and_rndcrop'),
                'da_enhance_list':
                params.get('DA_ENHANCE_LIST', None),
                'heuristic':
                params.get('HEURISTIC', None),
                'search_pruning':
                params.get('SEARCH_PRUNING', False),
                'state_below_index':
                -1,
                'output_text_index':
                0,
                'apply_tokenization':
                params.get('APPLY_TOKENIZATION', False),
                'tokenize_f':
                eval('dataset.' +
                     params.get('TOKENIZATION_METHOD', 'tokenize_none')),
                'apply_detokenization':
                params.get('APPLY_DETOKENIZATION', True),
                'detokenize_f':
                eval('dataset.' +
                     params.get('DETOKENIZATION_METHOD', 'detokenize_none')),
                'coverage_penalty':
                params.get('COVERAGE_PENALTY', False),
                'length_penalty':
                params.get('LENGTH_PENALTY', False),
                'length_norm_factor':
                params.get('LENGTH_NORM_FACTOR', 0.0),
                'coverage_norm_factor':
                params.get('COVERAGE_NORM_FACTOR', 0.0),
                'pos_unk':
                False,
                'state_below_maxlen':
                -1 if params.get('PAD_ON_BATCH', True) else params.get(
                    'MAX_OUTPUT_TEXT_LEN_TEST', 50),
                'output_max_length_depending_on_x':
                params.get('MAXLEN_GIVEN_X', False),
                'output_max_length_depending_on_x_factor':
                params.get('MAXLEN_GIVEN_X_FACTOR', 3),
                'output_min_length_depending_on_x':
                params.get('MINLEN_GIVEN_X', False),
                'output_min_length_depending_on_x_factor':
                params.get('MINLEN_GIVEN_X_FACTOR', 2),
                'attend_on_output':
                params.get('ATTEND_ON_OUTPUT', 'transformer'
                           in params['MODEL_TYPE'].lower()),
                'n_best_optimizer':
                params.get('N_BEST_OPTIMIZER', False)
            }

            # Build interactive sampler
            interactive_beam_searcher = InteractiveBeamSearchSampler(
                models,
                dataset,
                params_prediction,
                excluded_words=None,
                verbose=args.verbose)
            start_time = time.time()

            if args.verbose:
                logging.info("Params prediction = " + str(params_prediction))
                if args.online:
                    logging.info("Params training = " + str(params_training))
            n_samples = getattr(dataset, 'len_' + s)
            if args.references is None:
                all_references = dataset.extra_variables[s][
                    params['OUTPUTS_IDS_DATASET'][0]]

            # Start to translate the source file interactively
            for n_sample in range(n_samples):
                errors_sentence = 0
                mouse_actions_sentence = 0
                hypothesis_number = 0
                # Load data from dataset
                current_input = dataset.getX_FromIndices(
                    s, [n_sample],
                    normalization_type=params_prediction.get(
                        'normalization_type'),
                    normalization=params_prediction.get('normalize', False),
                    dataAugmentation=params_prediction.get(
                        'data_augmentation', False),
                    wo_da_patch_type=params_prediction.get(
                        'wo_da_patch_type', 'whole'),
                    da_patch_type=params_prediction.get(
                        'da_patch_type', 'resize_and_rndcrop'),
                    da_enhance_list=params_prediction.get(
                        'da_enhance_list', None))[0][0]

                # Load references
                references = all_references[n_sample]

                tokenized_references = list(map(
                    tokenize_f,
                    references)) if args.tokenize_references else references

                # Get reference as desired by the user, i.e. detokenized if necessary
                reference = list(map(params_prediction['detokenize_f'], tokenized_references)) if \
                    args.detokenize_bpe else tokenized_references

                # Detokenize line for nicer logging :)
                logger.debug(u'\n\nProcessing sample %d' % (n_sample + 1))
                logger.debug(u'Target: %s' % reference)

                # 1. Get a first hypothesis
                trans_indices, costs, alphas = interactive_beam_searcher.sample_beam_search_interactive(
                    current_input)

                # 1.2 Decode hypothesis
                hypothesis = decode_predictions_beam_search([trans_indices],
                                                            index2word_y,
                                                            pad_sequences=True,
                                                            verbose=0)[0]
                # 1.3 Store result (optional)
                hypothesis = params_prediction['detokenize_f'](hypothesis) \
                    if params_prediction.get('apply_detokenization', False) else hypothesis
                if args.original_dest is not None:
                    if params['SAMPLING_SAVE_MODE'] == 'list':
                        list2file(args.original_dest, [hypothesis],
                                  permission='a')
                    else:
                        raise Exception(
                            'Only "list" is allowed in "SAMPLING_SAVE_MODE"')
                logger.debug(u'Hypo_%d: %s' % (hypothesis_number, hypothesis))

                # 2.0 Interactive translation
                if hypothesis in tokenized_references:
                    # 2.1 If the sentence is correct, we  validate it
                    pass
                else:
                    # 2.2 Wrong hypothesis -> Interactively translate the sentence
                    correct_hypothesis = False
                    last_correct_pos = 0
                    while not correct_hypothesis:
                        # 2.2.1 Empty data structures for the next sentence
                        fixed_words_user = OrderedDict()
                        unk_words_dict = OrderedDict()
                        isle_indices = []
                        unks_in_isles = []

                        if args.prefix:
                            # 2.2.2 Compute longest common character prefix (LCCP)
                            reference_idx, next_correction_pos, validated_prefix = common_prefixes(
                                hypothesis, tokenized_references)
                        else:
                            # 2.2.2 Compute common character segments
                            #TODO
                            next_correction_pos, validated_prefix, validated_segments = common_segments(
                                hypothesis, reference)
                        reference = tokenized_references[reference_idx]
                        if next_correction_pos == len(reference):
                            correct_hypothesis = True
                            break
                        # 2.2.3 Get next correction by checking against the reference
                        next_correction = reference[next_correction_pos]

                        # 2.2.4 Tokenize the prefix properly (possibly applying BPE)
                        tokenized_validated_prefix = tokenize_f(
                            validated_prefix + next_correction)

                        # 2.2.5 Validate words
                        for pos, word in enumerate(
                                tokenized_validated_prefix.split()):
                            fixed_words_user[pos] = word2index_y.get(
                                word, unk_id)
                            if word2index_y.get(word) is None:
                                unk_words_dict[pos] = word

                        # 2.2.6 Constrain search for the last word
                        last_user_word_pos = list(fixed_words_user.keys())[-1]
                        if next_correction != u' ':
                            last_user_word = tokenized_validated_prefix.split(
                            )[-1]
                            filtered_idx2word = dict(
                                (word2index_y[candidate_word], candidate_word)
                                for candidate_word in word2index_y
                                if candidate_word[:len(last_user_word)] ==
                                last_user_word)
                            if filtered_idx2word != dict():
                                del fixed_words_user[last_user_word_pos]
                                if last_user_word_pos in unk_words_dict.keys():
                                    del unk_words_dict[last_user_word_pos]
                        else:
                            filtered_idx2word = dict()

                        logger.debug(u'"%s" to character %d.' %
                                     (next_correction, next_correction_pos))

                        # 2.2.7 Generate a hypothesis compatible with the feedback provided by the user
                        hypothesis = generate_constrained_hypothesis(
                            interactive_beam_searcher, current_input,
                            fixed_words_user, params_prediction, args,
                            isle_indices, filtered_idx2word, index2word_y,
                            None, None, None, unk_words_dict.keys(),
                            unk_words_dict.values(), unks_in_isles)
                        hypothesis_number += 1
                        hypothesis = u' '.join(
                            hypothesis)  # Hypothesis is unicode
                        hypothesis = params_prediction['detokenize_f'](hypothesis) \
                            if args.detokenize_bpe else hypothesis
                        logger.debug(u'Target: %s' % reference)
                        logger.debug(u"Hypo_%d: %s" %
                                     (hypothesis_number, hypothesis))
                        # 2.2.8 Add a keystroke
                        errors_sentence += 1
                        # 2.2.9 Add a mouse action if we moved the pointer
                        if next_correction_pos - last_correct_pos > 1:
                            mouse_actions_sentence += 1
                        last_correct_pos = next_correction_pos

                    # 2.3 Final check: The reference is a subset of the hypothesis: Cut the hypothesis
                    if len(reference) < len(hypothesis):
                        hypothesis = hypothesis[:len(reference)]
                        errors_sentence += 1
                        logger.debug(u"Cutting hypothesis")

                # 2.4 Security assertion
                assert hypothesis in references, "Error: The final hypothesis does not match with the reference! \n" \
                                                "\t Split: %s \n" \
                                                "\t Sentence: %d \n" \
                                                "\t Hypothesis: %s\n" \
                                                "\t Reference: %s" % (s, n_sample + 1,
                                                                      hypothesis,
                                                                      reference)
                # 3. Update user effort counters
                mouse_actions_sentence += 1  # This +1 is the validation action
                chars_sentence = len(hypothesis)
                total_errors += errors_sentence
                total_words += len(hypothesis.split())
                total_chars += chars_sentence
                total_mouse_actions += mouse_actions_sentence

                # 3.1 Log some info
                logger.debug(u"Final hypotesis: %s" % hypothesis)
                logger.debug(
                    u"%d errors. "
                    u"Sentence WSR: %4f. "
                    u"Sentence mouse strokes: %d "
                    u"Sentence MAR: %4f. "
                    u"Sentence MAR_c: %4f. "
                    u"Sentence KSMR: %4f. "
                    u"Accumulated (should only be considered for debugging purposes!) "
                    u"WSR: %4f. "
                    u"MAR: %4f. "
                    u"MAR_c: %4f. "
                    u"KSMR: %4f.\n\n\n\n" %
                    (errors_sentence, float(errors_sentence) / len(hypothesis),
                     mouse_actions_sentence,
                     float(mouse_actions_sentence) / len(hypothesis),
                     float(mouse_actions_sentence) / chars_sentence,
                     float(errors_sentence + mouse_actions_sentence) /
                     chars_sentence, float(total_errors) / total_words,
                     float(total_mouse_actions) / total_words,
                     float(total_mouse_actions) / total_chars,
                     float(total_errors + total_mouse_actions) / total_chars))
                # 4. If we are performing OL after each correct sample:
                if args.online:
                    # 4.1 Compute model inputs
                    # 4.1.1 Source text -> Already computed (used for the INMT process)
                    # 4.1.2 State below
                    state_below = dataset.loadText(
                        [reference],
                        vocabularies=dataset.vocabulary[
                            params['OUTPUTS_IDS_DATASET'][0]],
                        max_len=params['MAX_OUTPUT_TEXT_LEN_TEST'],
                        offset=1,
                        fill=dataset.fill_text[params['INPUTS_IDS_DATASET']
                                               [-1]],
                        pad_on_batch=dataset.pad_on_batch[
                            params['INPUTS_IDS_DATASET'][-1]],
                        words_so_far=False,
                        loading_X=True)[0]

                    # 4.1.3 Ground truth sample -> Interactively translated sentence
                    trg_seq = dataset.loadTextOneHot(
                        [reference],
                        vocabularies=dataset.vocabulary[
                            params['OUTPUTS_IDS_DATASET'][0]],
                        vocabulary_len=dataset.vocabulary_len[
                            params['OUTPUTS_IDS_DATASET'][0]],
                        max_len=params['MAX_OUTPUT_TEXT_LEN_TEST'],
                        offset=0,
                        fill=dataset.fill_text[params['OUTPUTS_IDS_DATASET']
                                               [0]],
                        pad_on_batch=dataset.pad_on_batch[
                            params['OUTPUTS_IDS_DATASET'][0]],
                        words_so_far=False,
                        sample_weights=params['SAMPLE_WEIGHTS'],
                        loading_X=False)
                    # 4.2 Train online!
                    online_trainer.train_online(
                        [np.asarray([current_input]), state_below],
                        trg_seq,
                        trg_words=[reference])
                # 5 Write correct sentences into a file
                list2file(args.dest, [hypothesis], permission='a')

                if (n_sample + 1) % 50 == 0:
                    logger.info(u"%d sentences processed" % (n_sample + 1))
                    logger.info(u"Current speed is {} per sentence".format(
                        (time.time() - start_time) / (n_sample + 1)))
                    logger.info(u"Current WSR is: %f" %
                                (float(total_errors) / total_words))
                    logger.info(u"Current MAR is: %f" %
                                (float(total_mouse_actions) / total_words))
                    logger.info(u"Current MAR_c is: %f" %
                                (float(total_mouse_actions) / total_chars))
                    logger.info(u"Current KSMR is: %f" %
                                (float(total_errors + total_mouse_actions) /
                                 total_chars))
        # 6. Final!
        # 6.1 Log some information
        print(u"Total number of errors:", total_errors)
        print(u"Total number selections", total_mouse_actions)
        print(u"WSR: %f" % (float(total_errors) / total_words))
        print(u"MAR: %f" % (float(total_mouse_actions) / total_words))
        print(u"MAR_c: %f" % (float(total_mouse_actions) / total_chars))
        print(u"KSMR: %f" %
              (float(total_errors + total_mouse_actions) / total_chars))

    except KeyboardInterrupt:
        print(u'Interrupted!')
        print(u"Total number of corrections (up to now):", total_errors)
        print(u"WSR: %f" % (float(total_errors) / total_words))
        print(u"MAR: %f" % (float(total_mouse_actions) / total_words))
        print(u"MAR_c: %f" % (float(total_mouse_actions) / total_chars))
        print(u"KSMR: %f" %
              (float(total_errors + total_mouse_actions) / total_chars))