def translate(src_seq, tokenized_input, params_prediction, index2word_y):
    # 1. Get a first hypothesis
    trans_indices, costs, alphas = interactive_beam_searcher.sample_beam_search_interactive(
        src_seq)
    # 1.1 Set unk replacemet strategy
    if params_prediction['pos_unk']:
        alphas = [alphas]
        sources = [tokenized_input]
        heuristic = params_prediction['heuristic']
    else:
        alphas = None
        heuristic = None
        sources = None

    # 1.2 Decode hypothesis
    hypothesis = decode_predictions_beam_search([trans_indices],
                                                index2word_y,
                                                alphas=alphas,
                                                x_text=sources,
                                                heuristic=heuristic,
                                                mapping=mapping,
                                                pad_sequences=True,
                                                verbose=0)[0]
    hypothesis = params_prediction['detokenize_f'](
        hypothesis) if params_prediction.get('apply_detokenization',
                                             False) else hypothesis
    return hypothesis, costs, alphas, trans_indices
    def check_grammar(self, input_sentence):

        with self.graph.as_default():
            with self.session.as_default():
                user_input = input_sentence
                with open('user_input.txt', 'w') as f:
                    f.write(user_input)
                self.dataset.setInput('user_input.txt',
                                      'test',
                                      type='text',
                                      id='source_text',
                                      pad_on_batch=True,
                                      tokenization='tokenize_basic',
                                      fill='end',
                                      max_text_len=30,
                                      min_occ=0,
                                      overwrite_split=True)

                self.dataset.setInput(None,
                                      'test',
                                      type='ghost',
                                      id='state_below',
                                      required=False,
                                      overwrite_split=True)

                self.dataset.setRawInput('user_input.txt',
                                         'test',
                                         type='file-name',
                                         id='raw_source_text',
                                         overwrite_split=True)

                vocab = self.dataset.vocabulary['target_text']['idx2words']

                predictions = self.nmt_model.predictBeamSearchNet(
                    self.dataset, self.params_prediction)['test']
                if self.params_prediction['pos_unk']:
                    samples = predictions['samples']
                    alphas = predictions['alphas']
                else:
                    samples = predictions
                    heuristic = None
                    sources = None
                predictions = decode_predictions_beam_search(
                    samples,  # The first element of predictions contain the word indices.
                    vocab,
                    verbose=self.params['VERBOSE'])

                #print("Correct Prediction: {}".format(predictions[0]))
                return predictions[0]
Exemplo n.º 3
0
    def predictResponse(self, context):
        with self.graph.as_default():
            with self.session.as_default():
                with open(os.path.join(MODEL_PATH, 'context.txt'), 'w') as f:
                    f.write(context)
                self.dataset.setInput(os.path.join(MODEL_PATH, 'context.txt'),
                    'test',
                    type='text',
                    id='source_text',
                    pad_on_batch=True,
                    tokenization='tokenize_basic',
                    fill='end',
                    max_text_len=30,
                    min_occ=0,
                    overwrite_split=True)

                self.dataset.setInput(None,
                            'test',
                            type='ghost',
                            id='state_below',
                            required=False,
                            overwrite_split=True)

                self.dataset.setRawInput(os.path.join(MODEL_PATH, 'context.txt'),
                              'test',
                              type='file-name',
                              id='raw_source_text',
                              overwrite_split=True)
                
                vocab = self.dataset.vocabulary['target_text']['idx2words']
                predictions = self.model.predictBeamSearchNet(self.dataset, self.params)['test']

                if self.params['pos_unk']:
                    samples = predictions['samples']
                    alphas = predictions['alphas']
                else:
                    samples = predictions
                    heuristic = None
                    sources = None

                predictions = decode_predictions_beam_search(samples, vocab)
                print('prediction: ' + predictions[0])
                return predictions[0]
Exemplo n.º 4
0
                  id='raw_source_text',
                  overwrite_split=True)

    
    vocab = dataset.vocabulary['target_text']['idx2words']
    predictions = nmt_model.predictBeamSearchNet(dataset, params_prediction)['test']

    if params_prediction['pos_unk']:
        samples = predictions['samples'] # The first element of predictions contain the word indices.
        alphas = predictions['alphas']
    else:
        samples = predictions
        heuristic = None
        sources = None

    predictions = decode_predictions_beam_search(samples,  vocab, verbose=params['VERBOSE'])
    
    print(predictions[0])
    bot_responses.append(predictions[0])
    if (len(context) > 2):
        context.pop(0)
    context.append(predictions[0])
    # text = open(os.path.join(DATA_PATH, 'train_y.txt')).read()
    # lines = text.split('\n')
    # for i, line in enumerate(lines):
    #     print('y_true: ' + line + '\t\ty_pred: ' + predictions[i])
    # filepath = '/content/drive/My Drive/test/user_input_preds.txt'
    # list2file(filepath, predictions)
    # with open(filepath, 'r') as f:
    #    pred = f.readline()
    # print(pred)
Exemplo n.º 5
0
def sample_ensemble(args, params):

    from data_engine.prepare_data import update_dataset_from_file
    from keras_wrapper.model_ensemble import BeamSearchEnsemble
    from keras_wrapper.cnn_model import loadModel
    from keras_wrapper.dataset import loadDataset
    from keras_wrapper.utils import decode_predictions_beam_search

    logging.info("Using an ensemble of %d models" % len(args.models))
    models = [loadModel(m, -1, full_path=True) for m in args.models]
    dataset = loadDataset(args.dataset)
    dataset = update_dataset_from_file(dataset,
                                       args.text,
                                       params,
                                       splits=args.splits,
                                       remove_outputs=True)

    params['INPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len[
        params['INPUTS_IDS_DATASET'][0]]
    params['OUTPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len[
        params['OUTPUTS_IDS_DATASET'][0]]
    # For converting predictions into sentences
    index2word_y = dataset.vocabulary[params['OUTPUTS_IDS_DATASET']
                                      [0]]['idx2words']

    if params.get('APPLY_DETOKENIZATION', False):
        detokenize_function = eval('dataset.' +
                                   params['DETOKENIZATION_METHOD'])

    params_prediction = dict()
    params_prediction['max_batch_size'] = params.get('BATCH_SIZE', 20)
    params_prediction['n_parallel_loaders'] = params.get('PARALLEL_LOADERS', 1)
    params_prediction['beam_size'] = params.get('BEAM_SIZE', 6)
    params_prediction['maxlen'] = params.get('MAX_OUTPUT_TEXT_LEN_TEST', 100)
    params_prediction['optimized_search'] = params['OPTIMIZED_SEARCH']
    params_prediction['model_inputs'] = params['INPUTS_IDS_MODEL']
    params_prediction['model_outputs'] = params['OUTPUTS_IDS_MODEL']
    params_prediction['dataset_inputs'] = params['INPUTS_IDS_DATASET']
    params_prediction['dataset_outputs'] = params['OUTPUTS_IDS_DATASET']
    params_prediction['search_pruning'] = params.get('SEARCH_PRUNING', False)
    params_prediction['normalize_probs'] = params.get('NORMALIZE_SAMPLING',
                                                      False)
    params_prediction['alpha_factor'] = params.get('ALPHA_FACTOR', 1.0)
    params_prediction['coverage_penalty'] = params.get('COVERAGE_PENALTY',
                                                       False)
    params_prediction['length_penalty'] = params.get('LENGTH_PENALTY', False)
    params_prediction['length_norm_factor'] = params.get(
        'LENGTH_NORM_FACTOR', 0.0)
    params_prediction['coverage_norm_factor'] = params.get(
        'COVERAGE_NORM_FACTOR', 0.0)
    params_prediction['pos_unk'] = params.get('POS_UNK', False)
    params_prediction['state_below_maxlen'] = -1 if params.get('PAD_ON_BATCH', True) \
        else params.get('MAX_OUTPUT_TEXT_LEN', 50)
    params_prediction['output_max_length_depending_on_x'] = params.get(
        'MAXLEN_GIVEN_X', True)
    params_prediction['output_max_length_depending_on_x_factor'] = params.get(
        'MAXLEN_GIVEN_X_FACTOR', 3)
    params_prediction['output_min_length_depending_on_x'] = params.get(
        'MINLEN_GIVEN_X', True)
    params_prediction['output_min_length_depending_on_x_factor'] = params.get(
        'MINLEN_GIVEN_X_FACTOR', 2)
    params_prediction['attend_on_output'] = params.get(
        'ATTEND_ON_OUTPUT', 'transformer' in params['MODEL_TYPE'].lower())

    heuristic = params.get('HEURISTIC', 0)
    mapping = None if dataset.mapping == dict() else dataset.mapping
    model_weights = args.weights

    if model_weights is not None and model_weights != []:
        assert len(model_weights) == len(
            models
        ), 'You should give a weight to each model. You gave %d models and %d weights.' % (
            len(models), len(model_weights))
        model_weights = map(lambda x: float(x), model_weights)
        if len(model_weights) > 1:
            logger.info('Giving the following weights to each model: %s' %
                        str(model_weights))
    for s in args.splits:
        # Apply model predictions
        params_prediction['predict_on_sets'] = [s]
        beam_searcher = BeamSearchEnsemble(models,
                                           dataset,
                                           params_prediction,
                                           model_weights=model_weights,
                                           n_best=args.n_best,
                                           verbose=args.verbose)
        if args.n_best:
            predictions, n_best = beam_searcher.predictBeamSearchNet()[s]
        else:
            predictions = beam_searcher.predictBeamSearchNet()[s]
            n_best = None
        if params_prediction['pos_unk']:
            samples = predictions[0]
            alphas = predictions[1]
            sources = [
                x.strip() for x in open(args.text, 'r').read().split('\n')
            ]
            sources = sources[:-1] if len(sources[-1]) == 0 else sources
        else:
            samples = predictions
            alphas = None
            heuristic = None
            sources = None

        predictions = decode_predictions_beam_search(samples,
                                                     index2word_y,
                                                     alphas=alphas,
                                                     x_text=sources,
                                                     heuristic=heuristic,
                                                     mapping=mapping,
                                                     verbose=args.verbose)
        # Apply detokenization function if needed
        if params.get('APPLY_DETOKENIZATION', False):
            predictions = map(detokenize_function, predictions)

        if args.n_best:
            n_best_predictions = []
            for i, (n_best_preds, n_best_scores,
                    n_best_alphas) in enumerate(n_best):
                n_best_sample_score = []
                for n_best_pred, n_best_score, n_best_alpha in zip(
                        n_best_preds, n_best_scores, n_best_alphas):
                    pred = decode_predictions_beam_search(
                        [n_best_pred],
                        index2word_y,
                        alphas=[n_best_alpha]
                        if params_prediction['pos_unk'] else None,
                        x_text=[sources[i]]
                        if params_prediction['pos_unk'] else None,
                        heuristic=heuristic,
                        mapping=mapping,
                        verbose=args.verbose)
                    # Apply detokenization function if needed
                    if params.get('APPLY_DETOKENIZATION', False):
                        pred = map(detokenize_function, pred)

                    n_best_sample_score.append([i, pred, n_best_score])
                n_best_predictions.append(n_best_sample_score)
        # Store result
        if args.dest is not None:
            filepath = args.dest  # results file
            if params.get('SAMPLING_SAVE_MODE', 'list'):
                list2file(filepath, predictions)
                if args.n_best:
                    nbest2file(filepath + '.nbest', n_best_predictions)
            else:
                raise Exception(
                    'Only "list" is allowed in "SAMPLING_SAVE_MODE"')
        else:
            list2stdout(predictions)
            if args.n_best:
                logging.info('Storing n-best sentences in ./' + s + '.nbest')
                nbest2file('./' + s + '.nbest', n_best_predictions)
        logging.info('Sampling finished')
Exemplo n.º 6
0
    def on_batch_end(self, n_update, logs=None):
        self.cum_update += 1
        if self.epoch_count + self.reload_epoch < self.start_sampling_on_epoch:
            return
        elif self.cum_update % self.each_n_updates != 0:
            return

        # Evaluate on each set separately
        for s in self.set_name:
            if self.beam_search:
                params_prediction = {'max_batch_size': self.batch_size,
                                     'n_parallel_loaders': self.extra_vars[
                                         'n_parallel_loaders'],
                                     'predict_on_sets': [s],
                                     'n_samples': self.n_samples,
                                     'pos_unk': False}
                params_prediction.update(checkDefaultParamsBeamSearch(self.extra_vars))
                predictions, truths, sources = self.model_to_eval.predictBeamSearchNet(self.ds, params_prediction)
            else:
                params_prediction = {'batch_size': self.batch_size,
                                     'n_parallel_loaders': self.extra_vars[
                                         'n_parallel_loaders'],
                                     'predict_on_sets': [s],
                                     'n_samples': self.n_samples,
                                     }
                # Convert predictions
                postprocess_fun = None
                if self.is_3DLabel:
                    postprocess_fun = [self.ds.convert_3DLabels_to_bboxes,
                                       self.extra_vars[s]['references_orig_sizes']]
                predictions = self.model_to_eval.predictNet(self.ds,
                                                            params_prediction,
                                                            postprocess_fun=postprocess_fun)

            if self.print_sources:
                if self.in_pred_idx is not None:
                    sources = [srcs[self.in_pred_idx][0] for srcs in sources]

                sources = decode_predictions_beam_search(sources,
                                                         self.index2word_x,
                                                         pad_sequences=True,
                                                         verbose=self.verbose)
            if s in predictions:
                if params_prediction['pos_unk']:
                    samples = predictions[s][0]
                    alphas = predictions[s][1]
                    heuristic = self.extra_vars['heuristic']
                else:
                    samples = predictions[s]
                    alphas = None
                    heuristic = None

                predictions = predictions[s]
                if self.is_text:
                    if self.out_pred_idx is not None:
                        samples = samples[self.out_pred_idx]
                    # Convert predictions into sentences
                    if self.beam_search:
                        predictions = decode_predictions_beam_search(samples,
                                                                     self.index2word_y,
                                                                     alphas=alphas,
                                                                     x_text=sources,
                                                                     heuristic=heuristic,
                                                                     mapping=self.extra_vars.get('mapping', None),
                                                                     verbose=self.verbose)
                    else:
                        predictions = decode_predictions(samples,
                                                         1,
                                                         self.index2word_y,
                                                         self.sampling_type,
                                                         verbose=self.verbose)
                    truths = decode_predictions_one_hot(truths, self.index2word_y,
                                                        verbose=self.verbose)

                    # Apply detokenization function if needed
                    if self.extra_vars.get('apply_detokenization', False):
                        if self.print_sources:
                            sources = map(self.extra_vars['detokenize_f'], sources)
                        predictions = map(self.extra_vars['detokenize_f'],
                                          predictions)
                        truths = map(self.extra_vars['detokenize_f'], truths)

                # Write samples
                if self.print_sources:
                    # Write samples
                    for i, (source, sample, truth) in list(enumerate(zip(sources, predictions, truths))):
                        if sys.version_info.major == 2:
                            source = str(source.encode('utf-8'))
                            sample = str(sample.encode('utf-8'))
                            truth = str(truth.encode('utf-8'))
                        print("Source     (%d): %s" % (i, source))
                        print("Hypothesis (%d): %s" % (i, sample))
                        print("Reference  (%d): %s" % (i, truth))
                        print("")
                else:
                    for i, (sample, truth) in list(enumerate(zip(predictions, truths))):
                        if sys.version_info.major == 2:
                            sample = str(sample.encode('utf-8'))
                            truth = str(truth.encode('utf-8'))
                        print("Hypothesis (%d): %s" % (i, sample))
                        print("Reference  (%d): %s" % (i, truth))
                        print("")
def generate_constrained_hypothesis(beam_searcher,
                                    src_seq,
                                    fixed_words_user,
                                    params,
                                    args,
                                    isle_indices,
                                    filtered_idx2word,
                                    index2word_y,
                                    sources,
                                    heuristic,
                                    mapping,
                                    unk_indices,
                                    unk_words,
                                    unks_in_isles,
                                    unk_id=1):
    """
    Generates and decodes a user-constrained hypothesis given a source sentence and the user feedback signals.
    :param src_seq: Sequence of indices of the source sentence to translate.
    :param fixed_words_user: Dict of word indices fixed by the user and its corresponding position: {pos: idx_word}
    :param args: Simulation options
    :param isle_indices: Isles fixed by the user. List of (isle_index, [words])
    :param filtered_idx2word: Dictionary of possible words according to the current word prefix.
    :param index2word_y: Indices to words mapping.
    :param sources: Source words (for unk replacement)
    :param heuristic: Unk replacement heuristic
    :param mapping: Source--Target dictionary for Unk replacement strategies 1 and 2
    :param unk_indices: Indices of the hypothesis that contain an unknown word (introduced by the user)
    :param unk_words: Corresponding word for unk_indices
    :return: Constrained hypothesis
    """
    # Generate a constrained hypothesis
    trans_indices, costs, alphas = beam_searcher. \
        sample_beam_search_interactive(src_seq,
                                       fixed_words=copy.copy(fixed_words_user),
                                       max_N=args.max_n,
                                       isles=isle_indices,
                                       valid_next_words=filtered_idx2word,
                                       idx2word=index2word_y)

    # Substitute possible unknown words in isles
    unk_in_isles = []
    for isle_idx, isle_sequence, isle_words in unks_in_isles:
        if unk_id in isle_sequence:
            unk_in_isles.append((subfinder(isle_sequence,
                                           list(trans_indices)), isle_words))

    if params.get('pos_unk', False):
        alphas = [alphas]
    else:
        alphas = None

    # Decode predictions
    hypothesis = decode_predictions_beam_search([trans_indices],
                                                index2word_y,
                                                alphas=alphas,
                                                x_text=sources,
                                                heuristic=heuristic,
                                                mapping=mapping,
                                                pad_sequences=True,
                                                verbose=0)[0]
    hypothesis = hypothesis.split()
    for (words_idx, starting_pos), words in unk_in_isles:
        for pos_unk_word, pos_hypothesis in enumerate(
                range(starting_pos, starting_pos + len(words_idx))):
            hypothesis[pos_hypothesis] = words[pos_unk_word]

    # UNK words management
    if len(unk_indices) > 0:  # If we added some UNK word
        if len(hypothesis) < len(
                unk_indices):  # The full hypothesis will be made up UNK words:
            for i, index in enumerate(range(0, len(hypothesis))):
                hypothesis[index] = unk_words[unk_indices[i]]
            for ii in range(i + 1, len(unk_words)):
                hypothesis.append(unk_words[ii])
        else:  # We put each unknown word in the corresponding gap
            for i, index in list(enumerate(unk_indices)):
                if index < len(hypothesis):
                    hypothesis[index] = list(unk_words)[i]
                else:
                    hypothesis.append(unk_words[i])

    return hypothesis
Exemplo n.º 8
0
    'beam_size': 12,
    'maxlen': 50,
    'model_inputs': ['source_text', 'state_below'],
    'model_outputs': ['target_text'],
    'dataset_inputs': ['source_text', 'state_below'],
    'dataset_outputs': ['target_text'],
    'normalize': True,
    'alpha_factor': 0.6
}

Control_predictions = Control_model.predictBeamSearchNet(
    dataset, params_prediction)['test']

vocab = dataset.vocabulary['target_text']['idx2words']
Control_predictions = decode_predictions_beam_search(Control_predictions,
                                                     vocab,
                                                     verbose=params['VERBOSE'])

## see how they compare to ground truth
#
from keras_wrapper.extra.read_write import list2file
from keras_wrapper.extra import evaluation

Control_path = 'Control_M7.pred'
list2file(Control_path, Control_predictions)

dataset.setOutput('data/Ross_test.reply',
                  'test',
                  type='text',
                  id='target_text',
                  pad_on_batch=True,
Exemplo n.º 9
0
    def evaluate(self, epoch, counter_name='epoch'):

        # Evaluate on each set separately
        for s in self.set_name:
            # Apply model predictions
            if self.beam_search:
                params_prediction = {
                    'batch_size': self.batch_size,
                    'n_parallel_loaders':
                    self.extra_vars['n_parallel_loaders'],
                    'predict_on_sets': [s],
                    'pos_unk': False,
                    'heuristic': 0,
                    'mapping': None
                }
                params_prediction.update(
                    checkDefaultParamsBeamSearch(self.extra_vars))
                predictions = self.model_to_eval.predictBeamSearchNet(
                    self.ds, params_prediction)[s]
            else:
                orig_size = self.extra_vars.get('eval_orig_size', False)
                params_prediction = {
                    'batch_size': self.batch_size,
                    'n_parallel_loaders':
                    self.extra_vars['n_parallel_loaders'],
                    'predict_on_sets': [s]
                }
                # Convert predictions
                postprocess_fun = None
                if self.is_3DLabel:
                    postprocess_fun = [
                        self.ds.convert_3DLabels_to_bboxes,
                        self.extra_vars[s]['references_orig_sizes']
                    ]
                elif orig_size:
                    postprocess_fun = [
                        self.ds.resize_semantic_output,
                        self.extra_vars[s]['eval_orig_size_id']
                    ]
                predictions = \
                    self.model_to_eval.predictNet(self.ds, params_prediction, postprocess_fun=postprocess_fun)[s]

            if self.is_text:
                if params_prediction.get('pos_unk', False):
                    samples = predictions[0]
                    alphas = predictions[1]

                    if eval('self.ds.loaded_raw_' + s + '[0]'):
                        sources = predictions[2]
                    else:
                        sources = []
                        for preds in predictions[2]:
                            for src in preds[self.input_text_id]:
                                sources.append(src)
                        sources = decode_predictions_beam_search(
                            sources,
                            self.index2word_x,
                            pad_sequences=True,
                            verbose=self.verbose)
                    heuristic = params_prediction['heuristic']
                else:
                    samples = predictions
                    alphas = None
                    heuristic = None
                    sources = None
                if self.out_pred_idx is not None:
                    samples = samples[self.out_pred_idx]
                # Convert predictions into sentences
                if self.beam_search:
                    predictions = decode_predictions_beam_search(
                        samples,
                        self.index2word_y,
                        alphas=alphas,
                        x_text=sources,
                        heuristic=heuristic,
                        mapping=params_prediction['mapping'],
                        verbose=self.verbose)
                else:
                    predictions = decode_predictions(
                        predictions,
                        1,  # always set temperature to 1
                        self.index2word_y,
                        self.sampling_type,
                        verbose=self.verbose)

            # Store predictions
            if self.write_samples:
                # Store result
                filepath = self.save_path + '/' + s + '_' + counter_name + '_' + str(
                    epoch) + '.pred'  # results file
                if self.write_type == 'list':
                    list2file(filepath, predictions)
                elif self.write_type == 'vqa':
                    list2vqa(filepath, predictions,
                             self.extra_vars[s]['question_ids'])
                elif self.write_type == 'listoflists':
                    listoflists2file(filepath, predictions)
                elif self.write_type == 'numpy':
                    numpy2file(filepath, predictions)
                elif self.write_type == '3DLabels':
                    # TODO:
                    print("WRITE SAMPLES FUNCTION NOT IMPLEMENTED")
                else:
                    raise NotImplementedError('The store type "' +
                                              self.write_type +
                                              '" is not implemented.')

            # Evaluate on each metric
            for metric in self.metric_name:
                if self.verbose > 0:
                    logging.info('Evaluating on metric ' + metric)
                filepath = self.save_path + '/' + s + '.' + metric  # results file

                # Evaluate on the chosen metric
                metrics = evaluation.select[metric](pred_list=predictions,
                                                    verbose=self.verbose,
                                                    extra_vars=self.extra_vars,
                                                    split=s)

                # Print results to file and store in model log
                with open(filepath, 'a') as f:
                    header = counter_name + ','
                    line = str(epoch) + ','
                    # Store in model log
                    self.model_to_eval.log(s, counter_name, epoch)
                    for metric_ in sorted(metrics):
                        value = metrics[metric_]
                        header += metric_ + ', '
                        line += str(value) + ', '
                        # Store in model log
                        self.model_to_eval.log(s, metric_, value)
                    if epoch == 1 or epoch == self.start_eval_on_epoch:
                        f.write(header + '\n')
                    f.write(line + '\n')
                if self.verbose > 0:
                    logging.info('Done evaluating on metric ' + metric)
        # Save the model
        if self.save_each_evaluation:
            from keras_wrapper.cnn_model import saveModel
            saveModel(self.model_to_eval,
                      epoch,
                      store_iter=not self.eval_on_epochs)
Exemplo n.º 10
0
def get_model_predictions(asts_path):
    print("os.getcwd()", os.getcwd())
    cur_dir = os.path.dirname(os.path.abspath(__file__))
    print("cur_dir", cur_dir)

    # if not os.path.isdir(os.path.join(os.getcwd(), 'keras')):
    #     print(subprocess.run(f'git clone https://github.com/MarcBS/keras.git', shell=True,
    #                          stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True))

    # # nmt_keras_dir = os.path.join(os.getcwd, 'nmt-keras')
    # if not os.path.isdir(os.path.join(os.getcwd(), 'nmt-keras')):
    #     print(subprocess.run(f'git clone https://github.com/lvapeab/nmt-keras && cd "nmt-keras" && pipenv install -e .', shell=True,
    #                          stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True))
    #     # print(subprocess.run(f'cd {nmt_keras_dir} && pipenv install -e .', shell=True,
    #     #                      stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True))
    #     print("ran cmds!!!")

    # sys.path.insert(0, os.path.join(os.getcwd(), 'nmt-keras'))
    # print("sys path!!!", sys.path)

    dataset = loadDataset(
        f'{cur_dir}/assets/epoch_{MODEL_EPOCH}_model_wrapper.pkl')
    with open('{cur_dir}/assets/params.json', 'r') as params_file:
        params = json.load(params_file)

    dataset.setInput(asts_path,
                     'test',
                     type='text',
                     id='source_text',
                     pad_on_batch=True,
                     tokenization=params['tokenize_x'],
                     fill='end',
                     max_text_len=params['x_max_text_len'],
                     min_occ=0)

    dataset.setInput(None,
                     'test',
                     type='ghost',
                     id='state_below',
                     required=False)

    dataset.setRawInput(asts_path,
                        'test',
                        type='file-name',
                        id='raw_source_text',
                        overwrite_split=True)

    nmt_model = loadModel('{cur_dir}/assets', MODEL_EPOCH)

    prediction_params = get_prediction_params()

    predictions = nmt_model.predictBeamSearchNet(dataset,
                                                 params_prediction)['test']

    vocab = dataset.vocabulary['target_text']['idx2words']
    samples = predictions['samples']  # Get word indices from the samples.

    predictions = decode_predictions_beam_search(samples,
                                                 vocab,
                                                 verbose=params['VERBOSE'])

    return predictions
Exemplo n.º 11
0
    def on_batch_end(self, n_update, logs=None):
        self.cum_update += 1
        if self.epoch_count + self.reload_epoch < self.start_sampling_on_epoch:
            return
        elif self.cum_update % self.each_n_updates != 0:
            return

        # Evaluate on each set separately
        for s in self.set_name:
            if self.beam_search:
                params_prediction = {'max_batch_size': self.batch_size,
                                     'n_parallel_loaders': self.extra_vars['n_parallel_loaders'],
                                     'predict_on_sets': [s],
                                     'n_samples': self.n_samples,
                                     'pos_unk': False}
                params_prediction.update(checkDefaultParamsBeamSearch(self.extra_vars))
                predictions, truths, sources = self.model_to_eval.predictBeamSearchNet(self.ds,
                                                                                       params_prediction)
            else:
                params_prediction = {'batch_size': self.batch_size,
                                     'n_parallel_loaders': self.extra_vars['n_parallel_loaders'],
                                     'predict_on_sets': [s],
                                     'n_samples': self.n_samples,
                                     'verbose': self.verbose,
                                     }
                # Convert predictions
                postprocess_fun = None
                if self.is_3DLabel:
                    postprocess_fun = [self.ds.convert_3DLabels_to_bboxes,
                                       self.extra_vars[s]['references_orig_sizes']]
                predictions = self.model_to_eval.predictNet(self.ds,
                                                            params_prediction,
                                                            postprocess_fun=postprocess_fun)

            if self.print_sources:
                if self.in_pred_idx is not None:
                    sources = [srcs[self.in_pred_idx][0] for srcs in sources]

                sources = decode_predictions_beam_search(sources,
                                                         self.index2word_x,
                                                         pad_sequences=True,
                                                         verbose=self.verbose)
            if s in predictions:
                if params_prediction['pos_unk']:
                    samples = predictions[s][0]
                    alphas = predictions[s][1]
                    heuristic = self.extra_vars['heuristic']
                else:
                    samples = predictions[s]
                    alphas = None
                    heuristic = None

                predictions = predictions[s]
                if self.is_text:
                    if self.out_pred_idx is not None:
                        samples = samples[self.out_pred_idx]
                    # Convert predictions into sentences
                    if self.beam_search:
                        predictions = decode_predictions_beam_search(samples,
                                                                     self.index2word_y,
                                                                     glossary=self.extra_vars.get('glossary', None),
                                                                     alphas=alphas,
                                                                     x_text=sources,
                                                                     heuristic=heuristic,
                                                                     mapping=self.extra_vars.get('mapping', None),
                                                                     verbose=self.verbose)
                    else:
                        predictions = decode_predictions(samples,
                                                         1,
                                                         self.index2word_y,
                                                         self.sampling_type,
                                                         verbose=self.verbose)
                    truths = decode_predictions_one_hot(truths, self.index2word_y,
                                                        verbose=self.verbose)

                    # Apply detokenization function if needed
                    if self.extra_vars.get('apply_detokenization', False):
                        if self.print_sources:
                            sources = list(map(self.extra_vars['detokenize_f'], sources))
                        predictions = list(map(self.extra_vars['detokenize_f'], predictions))
                        truths = list(map(self.extra_vars['detokenize_f'], truths))

                # Write samples
                if self.print_sources:
                    # Write samples
                    for i, (source, sample, truth) in list(enumerate(zip(sources, predictions, truths))):
                        if sys.version_info.major == 2:
                            source = str(source.encode('utf-8'))
                            sample = str(sample.encode('utf-8'))
                            truth = str(truth.encode('utf-8'))
                        print("Source     (%d): %s" % (i, source))
                        print("Hypothesis (%d): %s" % (i, sample))
                        print("Reference  (%d): %s" % (i, truth))
                        print("")
                else:
                    for i, (sample, truth) in list(enumerate(zip(predictions, truths))):
                        if sys.version_info.major == 2:
                            sample = str(sample.encode('utf-8'))
                            truth = str(truth.encode('utf-8'))
                        print("Hypothesis (%d): %s" % (i, sample))
                        print("Reference  (%d): %s" % (i, truth))
                        print("")
Exemplo n.º 12
0
    def evaluate(self, epoch, counter_name='epoch', logs=None):
        if logs is None:
            logs = {}
        # Change inputs and outputs mappings for evaluation
        self.changeInOutMappings()

        # Evaluate on each set separately
        all_metrics = []

        for s in self.set_name:
            # Apply model predictions
            if self.beam_search:
                params_prediction = {'max_batch_size': self.batch_size,
                                     'n_parallel_loaders': self.extra_vars.get('n_parallel_loaders', 1),
                                     'predict_on_sets': [s],
                                     'beam_batch_size': self.beam_batch_size if
                                     self.beam_batch_size is not None else self.batch_size,
                                     'pos_unk': False,
                                     'normalize': self.normalize,
                                     'normalization_type': self.normalization_type,
                                     'max_eval_samples': self.max_eval_samples
                                     }

                params_prediction.update(checkDefaultParamsBeamSearch(self.extra_vars))
                predictions_all = self.model_to_eval.predictBeamSearchNet(self.ds, params_prediction)[s]
            else:
                orig_size = self.extra_vars.get('eval_orig_size', False)
                params_prediction = {'batch_size': self.batch_size,
                                     'n_parallel_loaders': self.extra_vars.get('n_parallel_loaders', 1),
                                     'predict_on_sets': [s],
                                     'normalize': self.normalize,
                                     'normalization_type': self.normalization_type,
                                     'max_eval_samples': self.max_eval_samples,
                                     'model_name': self.model_name,
                                     }
                # Convert predictions
                postprocess_fun = None
                if self.is_3DLabel:
                    postprocess_fun = [self.ds.convert_3DLabels_to_bboxes,
                                       self.extra_vars[s]['references_orig_sizes']]
                elif orig_size:
                    postprocess_fun = [self.ds.resize_semantic_output,
                                       self.extra_vars[s]['eval_orig_size_id']]
                predictions_all = \
                    self.model_to_eval.predictNet(self.ds, params_prediction,
                                                  postprocess_fun=postprocess_fun)[s]

            # Single-output model
            if not self.gt_pos or self.gt_pos == 0 or len(self.gt_pos) == 1:
                if len(predictions_all) != 2:
                    predictions_all = [predictions_all]
                gt_positions = [0]

            # Multi-output model
            else:
                gt_positions = self.gt_pos

            # Select each output to evaluate separately
            for gt_pos, type_out, these_metrics, gt_id, write_type, index2word_y, index2word_x in zip(
                    gt_positions,
                    self.output_types,
                    self.metric_name,
                    self.gt_id,
                    self.write_type,
                    self.index2word_y,
                    self.index2word_x):

                predictions = predictions_all[gt_pos]

                if self.verbose > 0:
                    print('')
                    logging.info('Prediction output ' + str(gt_pos) + ': ' + str(
                        gt_id) + ' (' + str(type_out) + ')')
                # Postprocess outputs of type text
                if type_out == 'text':
                    if params_prediction.get('pos_unk', False):
                        samples = predictions[0]
                        alphas = predictions[1]

                        if eval('self.ds.loaded_raw_' + s + '[0]'):
                            sources = predictions[2]
                        else:
                            sources = []
                            for preds in predictions[2]:
                                for src in preds[self.input_text_id]:
                                    sources.append(src)
                            sources = decode_predictions_beam_search(sources,
                                                                     index2word_x,
                                                                     pad_sequences=True,
                                                                     verbose=self.verbose)
                        heuristic = self.extra_vars['heuristic']
                    else:
                        samples = predictions
                        alphas = None
                        heuristic = None
                        sources = None
                    if self.out_pred_idx is not None:
                        samples = samples[self.out_pred_idx]

                    # Convert predictions into sentences
                    if self.beam_search:
                        predictions = decode_predictions_beam_search(samples,
                                                                     index2word_y,
                                                                     glossary=self.extra_vars.get('glossary', None),
                                                                     alphas=alphas,
                                                                     x_text=sources,
                                                                     heuristic=heuristic,
                                                                     mapping=self.extra_vars.get('mapping', None),
                                                                     verbose=self.verbose)
                    else:
                        probs = predictions
                        predictions = decode_predictions(predictions,
                                                         1,
                                                         # always set temperature to 1
                                                         index2word_y,
                                                         self.sampling_type,
                                                         verbose=self.verbose)
                    # Apply detokenization function if needed
                    if self.extra_vars.get('apply_detokenization', False):
                        predictions = list(map(self.extra_vars['detokenize_f'], predictions))

                # Postprocess outputs of type binary
                elif type_out == 'binary':
                    predictions = decode_multilabel(predictions,
                                                    index2word_y,
                                                    min_val=self.min_pred_multilabel[
                                                        gt_pos],
                                                    verbose=self.verbose)

                    # Prepare references
                    # exec ("y_raw = self.ds.Y_" + s + "[gt_id]")
                    y_split = getattr(self.ds, 'Y_' + s)
                    y_raw = y_split[gt_id]
                    self.extra_vars[gt_pos][s]['references'] = self.ds.loadBinary(y_raw, gt_id)

                # Postprocess outputs of type 3DLabel
                elif type_out == '3DLabel':
                    self.extra_vars[gt_pos][s] = dict()
                    # exec ('ref=self.ds.Y_' + s + '["' + gt_id + '"]')
                    y_split = getattr(self.ds, 'Y_' + s)
                    ref = y_split[gt_id]
                    [ref, original_sizes] = self.ds.convert_GT_3DLabels_to_bboxes(
                        ref)
                    self.extra_vars[gt_pos][s]['references'] = ref
                    self.extra_vars[gt_pos][s]['references_orig_sizes'] = original_sizes

                # Postprocess outputs of type 3DSemanticLabel
                elif type_out == '3DSemanticLabel':
                    self.extra_vars[gt_pos]['eval_orig_size'] = self.eval_orig_size
                    self.extra_vars[gt_pos][s] = dict()
                    # exec ('ref=self.ds.Y_' + s + '["' + gt_id + '"]')
                    y_split = getattr(self.ds, 'Y_' + s)
                    ref = y_split[gt_id]
                    if self.eval_orig_size:
                        old_crop = copy.deepcopy(self.ds.img_size_crop)
                        self.ds.img_size_crop = copy.deepcopy(self.ds.img_size)
                        self.extra_vars[gt_pos][s]['eval_orig_size_id'] = np.array([gt_id] * len(ref))
                    ref = self.ds.load_GT_3DSemanticLabels(ref, gt_id)
                    if self.eval_orig_size:
                        self.ds.img_size_crop = copy.deepcopy(old_crop)
                    self.extra_vars[gt_pos][s]['references'] = ref

                # Other output data types
                else:
                    # exec ("self.extra_vars[gt_pos][s]['references'] = self.ds.Y_" + s + "[gt_id]")
                    y_split = getattr(self.ds, 'Y_' + s)
                    self.extra_vars[gt_pos][s]['references'] = y_split[gt_id]
                # Store predictions
                if self.write_samples:
                    # Store result
                    filepath = self.save_path + '/' + s + '_' + counter_name + '_' + str(epoch) + '_output_' + str(gt_pos) + '.pred'  # results file
                    if write_type == 'list':
                        list2file(filepath, predictions)
                    elif write_type == 'vqa':
                        try:
                            # exec ('refs = self.ds.Y_' + s + '[gt_id]')
                            y_split = getattr(self.ds, 'Y_' + s)
                            refs = y_split[gt_id]
                        except Exception:
                            refs = ['N/A' for _ in range(probs.shape[0])]
                        extra_data_plot = {'reference': refs,
                                           'probs': probs,
                                           'vocab': index2word_y}
                        list2vqa(filepath, predictions,
                                 self.extra_vars[gt_pos][s]['question_ids'],
                                 extra=extra_data_plot)
                    elif write_type == 'listoflists':
                        listoflists2file(filepath, predictions)
                    elif write_type == 'numpy':
                        numpy2file(filepath, predictions)
                    elif write_type == '3DLabels':
                        raise NotImplementedError(
                            'Write 3DLabels function is not implemented')
                    elif write_type == '3DSemanticLabel':
                        folder_path = self.save_path + '/' + s + '_' + counter_name + '_' + str(
                            epoch)  # results folder
                        numpy2imgs(folder_path,
                                   predictions,
                                   eval('self.ds.X_' + s + '["' + self.input_id + '"]'),
                                   self.ds)
                    else:
                        raise NotImplementedError('The store type "' + self.write_type + '" is not implemented.')

                # Evaluate on each metric
                for metric in these_metrics:
                    if self.verbose > 0:
                        logging.info('Evaluating on metric ' + metric)
                    filepath = self.save_path + '/' + s + '.' + metric  # results file

                    if s == 'train':
                        logging.info(
                            "WARNING: evaluation results on 'train' split might be incorrect when"
                            "applying random image shuffling.")

                    # Evaluate on the chosen metric
                    metrics = evaluation.select[metric](
                        pred_list=predictions,
                        verbose=self.verbose,
                        extra_vars=self.extra_vars[gt_pos],
                        split=s)

                    # Print results to file and store in model log
                    with open(filepath, 'a') as f:
                        header = counter_name + ','
                        line = str(epoch) + ','
                        # Store in model log
                        self.model_to_eval.log(s, counter_name, epoch)
                        for metric_ in sorted(metrics):
                            value = metrics[metric_]
                            # Multiple-output model
                            if self.gt_pos and self.gt_pos != 0:
                                metric_ += '_output_' + str(gt_pos)
                            all_metrics.append(metric_)
                            header += metric_ + ','
                            line += str(value) + ','
                            # Store in model log
                            self.model_to_eval.log(s, metric_, value)
                        if not self.written_header:
                            f.write(header + '\n')
                            self.written_header = True
                        f.write(line + '\n')

                    if self.verbose > 0:
                        logging.info('Done evaluating on metric ' + metric)

        # Store losses
        if logs.get('loss') is not None:
            self.model_to_eval.log('train', 'train_loss', logs['loss'])
        if logs.get('valid_loss') is not None:
            self.model_to_eval.log('val', 'val_loss', logs['valid_loss'])

        # Plot results so far
        if self.do_plot:
            if self.metric_name:
                self.model_to_eval.plot(counter_name, set(all_metrics),
                                        self.set_name, upperbound=self.max_plot)

        # Save the model
        if self.save_each_evaluation:
            from keras_wrapper.cnn_model import saveModel
            saveModel(self.model_to_eval, epoch, store_iter=not self.eval_on_epochs)

        # Recover inputs and outputs mappings for resume training
        self.recoverInOutMappings()
Exemplo n.º 13
0
def user_input_prediction(best_epoch):

    params = load_parameters()
    dataset = loadDataset(PATH + "dataset/Dataset_tutorial_dataset.pkl")
    params['INPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len[
        params['INPUTS_IDS_DATASET'][0]]
    params['OUTPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len[
        params['OUTPUTS_IDS_DATASET'][0]]

    # Load model
    nmt_model = loadModel(PATH + 'model/', best_epoch)

    params_prediction = {
        'language': 'en',
        'tokenize_f': eval('dataset.' + 'tokenize_basic'),
        'beam_size': 12,
        'length_penalty': True,
        'length_norm_factor': 1.0,
        'optimized_search': True,
        'model_inputs': params['INPUTS_IDS_MODEL'],
        'model_outputs': params['OUTPUTS_IDS_MODEL'],
        'dataset_inputs': params['INPUTS_IDS_DATASET'],
        'dataset_outputs': params['OUTPUTS_IDS_DATASET'],
        'n_parallel_loaders': 1,
        'maxlen': 50,
        'model_inputs': ['source_text', 'state_below'],
        'model_outputs': ['target_text'],
        'dataset_inputs': ['source_text', 'state_below'],
        'dataset_outputs': ['target_text'],
        'normalize': True,
        'pos_unk': True,
        'heuristic': 0,
        'state_below_maxlen': 1,
        'predict_on_sets': ['test'],
        'verbose': 0,
    }
    result = pyfiglet.figlet_format("TESTING WITH USER INPUT".format(mode),
                                    font="digital")
    print(result)
    while True:
        print("Input a sentence:")
        user_input = input()

        with open('user_input.txt', 'w') as f:
            f.write(user_input)
        dataset.setInput('user_input.txt',
                         'test',
                         type='text',
                         id='source_text',
                         pad_on_batch=True,
                         tokenization='tokenize_basic',
                         fill='end',
                         max_text_len=100,
                         min_occ=1,
                         overwrite_split=True)

        dataset.setInput(None,
                         'test',
                         type='ghost',
                         id='state_below',
                         required=False,
                         overwrite_split=True)

        dataset.setRawInput('user_input.txt',
                            'test',
                            type='file-name',
                            id='raw_source_text',
                            overwrite_split=True)

        vocab = dataset.vocabulary['target_text']['idx2words']
        predictions = nmt_model.predictBeamSearchNet(dataset,
                                                     params_prediction)['test']
        predictions = decode_predictions_beam_search(
            predictions[
                0],  # The first element of predictions contain the word indices.
            vocab,
            verbose=params['VERBOSE'])

        print(predictions[0])
Exemplo n.º 14
0
Arquivo: main.py Projeto: lvapeab/TMA
def apply_Video_model(params):
    """
        Function for using a previously trained model for sampling.
    """

    ########### Load data
    dataset = build_dataset(params)
    if not '-vidtext-embed' in params['DATASET_NAME']:
        params['OUTPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len[
            params['OUTPUTS_IDS_DATASET'][0]]
    else:
        params['OUTPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len[
            params['INPUTS_IDS_DATASET'][1]]
    ###########

    ########### Load model
    video_model = loadModel(params['STORE_PATH'],
                            params['SAMPLING_RELOAD_POINT'],
                            reload_epoch=params['SAMPLING_RELOAD_EPOCH'])
    video_model.setOptimizer()
    ###########

    ########### Apply sampling
    extra_vars = dict()
    extra_vars['tokenize_f'] = eval('dataset.' + params['TOKENIZATION_METHOD'])
    extra_vars['language'] = params.get('TRG_LAN', 'en')

    for s in params["EVAL_ON_SETS"]:

        # Apply model predictions
        params_prediction = {
            'max_batch_size': params['BATCH_SIZE'],
            'n_parallel_loaders': params['PARALLEL_LOADERS'],
            'predict_on_sets': [s]
        }

        # Convert predictions into sentences
        if not '-vidtext-embed' in params['DATASET_NAME']:
            vocab = dataset.vocabulary[params['OUTPUTS_IDS_DATASET']
                                       [0]]['idx2words']
        else:
            vocab = None

        if params['BEAM_SEARCH']:
            params_prediction['beam_size'] = params['BEAM_SIZE']
            params_prediction['maxlen'] = params['MAX_OUTPUT_TEXT_LEN_TEST']
            params_prediction['optimized_search'] = params[
                'OPTIMIZED_SEARCH'] and '-upperbound' not in params[
                    'DATASET_NAME']
            params_prediction['model_inputs'] = params['INPUTS_IDS_MODEL']
            params_prediction['model_outputs'] = params['OUTPUTS_IDS_MODEL']
            params_prediction['dataset_inputs'] = params['INPUTS_IDS_DATASET']
            params_prediction['dataset_outputs'] = params[
                'OUTPUTS_IDS_DATASET']
            params_prediction['normalize_probs'] = params['NORMALIZE_SAMPLING']
            params_prediction['alpha_factor'] = params['ALPHA_FACTOR']
            params_prediction['temporally_linked'] = '-linked' in params['DATASET_NAME'] and '-upperbound' not in \
                                                                                             params[
                                                                                                 'DATASET_NAME'] and '-video' not in \
                                                                                                                     params[
                                                                                                                         'DATASET_NAME']
            predictions = video_model.predictBeamSearchNet(
                dataset, params_prediction)[s]
            predictions = decode_predictions_beam_search(
                predictions, vocab, verbose=params['VERBOSE'])
        else:
            predictions = video_model.predictNet(dataset, params_prediction)[s]
            predictions = decode_predictions(predictions,
                                             1,
                                             vocab,
                                             params['SAMPLING'],
                                             verbose=params['VERBOSE'])

        # Store result
        filepath = video_model.model_path + '/' + s + '_sampling.pred'  # results file
        if params['SAMPLING_SAVE_MODE'] == 'list':
            list2file(filepath, predictions)
        else:
            raise Exception, 'Only "list" is allowed in "SAMPLING_SAVE_MODE"'

        # Evaluate if any metric in params['METRICS']
        for metric in params['METRICS']:
            logging.info('Evaluating on metric ' + metric)
            filepath = video_model.model_path + '/' + s + '_sampling.' + metric  # results file

            # Evaluate on the chosen metric
            extra_vars[s] = dict()
            extra_vars[s]['references'] = dataset.extra_variables[s][
                params['OUTPUTS_IDS_DATASET'][0]]
            metrics = selectMetric[metric](pred_list=predictions,
                                           verbose=1,
                                           extra_vars=extra_vars,
                                           split=s)

            # Print results to file
            with open(filepath, 'w') as f:
                header = ''
                line = ''
                for metric_ in sorted(metrics):
                    value = metrics[metric_]
                    header += metric_ + ','
                    line += str(value) + ','
                f.write(header + '\n')
                f.write(line + '\n')
            logging.info('Done evaluating on metric ' + metric)
def interactive_simulation():

    args = parse_args()
    # Update parameters
    if args.config is not None:
        logger.info('Reading parameters from %s.' % args.config)
        params = update_parameters({}, pkl2dict(args.config))
    else:
        logger.info('Reading parameters from config.py.')
        params = load_parameters()

    if args.online:
        from config_online import load_parameters as load_parameters_online
        online_parameters = load_parameters_online(params)
        params = update_parameters(params, online_parameters)

    try:
        for arg in args.changes:
            try:
                k, v = arg.split('=')
            except ValueError:
                print(
                    'Overwritten arguments must have the form key=Value. \n Currently are: %s'
                    % str(args.changes))
                exit(1)
            try:
                params[k] = ast.literal_eval(v)
            except ValueError:
                params[k] = v
    except ValueError:
        print('Error processing arguments: (', k, ",", v, ")")
        exit(2)

    check_params(params)
    if args.verbose:
        logging.info("params = " + str(params))
    dataset = loadDataset(args.dataset)
    # dataset = update_dataset_from_file(dataset, args.source, params, splits=args.splits, remove_outputs=True)
    # Dataset backwards compatibility
    bpe_separator = dataset.BPE_separator if hasattr(
        dataset,
        "BPE_separator") and dataset.BPE_separator is not None else u'@@'
    # Set tokenization method
    params[
        'TOKENIZATION_METHOD'] = 'tokenize_bpe' if args.tokenize_bpe else params.get(
            'TOKENIZATION_METHOD', 'tokenize_none')
    # Build BPE tokenizer if necessary
    if 'bpe' in params['TOKENIZATION_METHOD'].lower():
        logger.info('Building BPE')
        if not dataset.BPE_built:
            dataset.build_bpe(params.get(
                'BPE_CODES_PATH',
                params['DATA_ROOT_PATH'] + '/training_codes.joint'),
                              separator=bpe_separator)
    # Build tokenization function
    tokenize_f = eval('dataset.' +
                      params.get('TOKENIZATION_METHOD', 'tokenize_none'))

    if args.online:
        # Traning params
        params_training = {  # Traning params
            'n_epochs': params['MAX_EPOCH'],
            'shuffle': False,
            'loss': params.get('LOSS', 'categorical_crossentropy'),
            'batch_size': params.get('BATCH_SIZE', 1),
            'homogeneous_batches': False,
            'optimizer': params.get('OPTIMIZER', 'SGD'),
            'lr': params.get('LR', 0.1),
            'lr_decay': params.get('LR_DECAY', None),
            'lr_gamma': params.get('LR_GAMMA', 1.),
            'epochs_for_save': -1,
            'verbose': args.verbose,
            'eval_on_sets': params['EVAL_ON_SETS_KERAS'],
            'n_parallel_loaders': params['PARALLEL_LOADERS'],
            'extra_callbacks': [],  # callbacks,
            'reload_epoch': 0,
            'epoch_offset': 0,
            'data_augmentation': params['DATA_AUGMENTATION'],
            'patience': params.get('PATIENCE', 0),
            'metric_check': params.get('STOP_METRIC', None),
            'eval_on_epochs': params.get('EVAL_EACH_EPOCHS', True),
            'each_n_epochs': params.get('EVAL_EACH', 1),
            'start_eval_on_epoch': params.get('START_EVAL_ON_EPOCH', 0),
            'additional_training_settings': {
                'k': params.get('K', 1),
                'tau': params.get('TAU', 1),
                'lambda': params.get('LAMBDA', 0.5),
                'c': params.get('C', 0.5),
                'd': params.get('D', 0.5)
            }
        }
    else:
        params_training = dict()

    params['OUTPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len[
        params['OUTPUTS_IDS_DATASET'][0]]
    logger.info("<<< Using an ensemble of %d models >>>" % len(args.models))
    if args.online:
        # Load trainable model(s)
        logging.info('Loading models from %s' % str(args.models))
        model_instances = [
            Captioning_Model(params,
                             model_type=params['MODEL_TYPE'],
                             verbose=params['VERBOSE'],
                             model_name=params['MODEL_NAME'] + '_' + str(i),
                             vocabularies=dataset.vocabulary,
                             store_path=params['STORE_PATH'],
                             clear_dirs=False,
                             set_optimizer=False)
            for i in range(len(args.models))
        ]
        models = [
            updateModel(model, path, -1, full_path=True)
            for (model, path) in zip(model_instances, args.models)
        ]

        # Set additional inputs to models if using a custom loss function
        params['USE_CUSTOM_LOSS'] = True if 'PAS' in params[
            'OPTIMIZER'] else False
        if params['N_BEST_OPTIMIZER']:
            logging.info('Using N-best optimizer')

        models = build_online_models(models, params)
        online_trainer = OnlineTrainer(models,
                                       dataset,
                                       None,
                                       None,
                                       params_training,
                                       verbose=args.verbose)
    else:
        # Otherwise, load regular model(s)
        models = [loadModel(m, -1, full_path=True) for m in args.models]

    # Load text files
    logger.info("<<< Storing corrected hypotheses into: %s >>>" %
                str(args.dest))
    ftrans = open(args.dest, 'w')
    ftrans.close()

    # Do we want to save the original sentences?
    if args.original_dest is not None:
        logger.info("<<< Storing original hypotheses into: %s >>>" %
                    str(args.original_dest))
        ftrans_ori = open(args.original_dest, 'w')
        ftrans_ori.close()

    if args.references is not None:
        ftrg = codecs.open(args.references, 'r', encoding='utf-8'
                           )  # File with post-edited (or reference) sentences.
        all_references = ftrg.read().split('\n')
        if all_references[-1] == u'':
            all_references = all_references[:-1]

    # Get word2index and index2word dictionaries
    index2word_y = dataset.vocabulary[params['OUTPUTS_IDS_DATASET']
                                      [0]]['idx2words']
    word2index_y = dataset.vocabulary[params['OUTPUTS_IDS_DATASET']
                                      [0]]['words2idx']
    unk_id = dataset.extra_words['<unk>']

    # Initialize counters
    total_errors = 0
    total_words = 0
    total_chars = 0
    total_mouse_actions = 0
    try:
        for s in args.splits:
            # Apply model predictions
            params_prediction = {
                'max_batch_size':
                params['BATCH_SIZE'],
                'n_parallel_loaders':
                params['PARALLEL_LOADERS'],
                'predict_on_sets': [s],
                'beam_size':
                params['BEAM_SIZE'],
                'maxlen':
                params['MAX_OUTPUT_TEXT_LEN_TEST'],
                'optimized_search':
                params['OPTIMIZED_SEARCH'],
                'model_inputs':
                params['INPUTS_IDS_MODEL'],
                'model_outputs':
                params['OUTPUTS_IDS_MODEL'],
                'dataset_inputs':
                params['INPUTS_IDS_DATASET'],
                'dataset_outputs':
                params['OUTPUTS_IDS_DATASET'],
                'normalize_probs':
                params.get('NORMALIZE_SAMPLING', False),
                'alpha_factor':
                params.get('ALPHA_FACTOR', 1.0),
                'normalize':
                params.get('NORMALIZATION', False),
                'normalization_type':
                params.get('NORMALIZATION_TYPE', None),
                'data_augmentation':
                params.get('DATA_AUGMENTATION', False),
                'mean_substraction':
                params.get('MEAN_SUBTRACTION', False),
                'wo_da_patch_type':
                params.get('WO_DA_PATCH_TYPE', 'whole'),
                'da_patch_type':
                params.get('DA_PATCH_TYPE', 'resize_and_rndcrop'),
                'da_enhance_list':
                params.get('DA_ENHANCE_LIST', None),
                'heuristic':
                params.get('HEURISTIC', None),
                'search_pruning':
                params.get('SEARCH_PRUNING', False),
                'state_below_index':
                -1,
                'output_text_index':
                0,
                'apply_tokenization':
                params.get('APPLY_TOKENIZATION', False),
                'tokenize_f':
                eval('dataset.' +
                     params.get('TOKENIZATION_METHOD', 'tokenize_none')),
                'apply_detokenization':
                params.get('APPLY_DETOKENIZATION', True),
                'detokenize_f':
                eval('dataset.' +
                     params.get('DETOKENIZATION_METHOD', 'detokenize_none')),
                'coverage_penalty':
                params.get('COVERAGE_PENALTY', False),
                'length_penalty':
                params.get('LENGTH_PENALTY', False),
                'length_norm_factor':
                params.get('LENGTH_NORM_FACTOR', 0.0),
                'coverage_norm_factor':
                params.get('COVERAGE_NORM_FACTOR', 0.0),
                'pos_unk':
                False,
                'state_below_maxlen':
                -1 if params.get('PAD_ON_BATCH', True) else params.get(
                    'MAX_OUTPUT_TEXT_LEN_TEST', 50),
                'output_max_length_depending_on_x':
                params.get('MAXLEN_GIVEN_X', False),
                'output_max_length_depending_on_x_factor':
                params.get('MAXLEN_GIVEN_X_FACTOR', 3),
                'output_min_length_depending_on_x':
                params.get('MINLEN_GIVEN_X', False),
                'output_min_length_depending_on_x_factor':
                params.get('MINLEN_GIVEN_X_FACTOR', 2),
                'attend_on_output':
                params.get('ATTEND_ON_OUTPUT', 'transformer'
                           in params['MODEL_TYPE'].lower()),
                'n_best_optimizer':
                params.get('N_BEST_OPTIMIZER', False)
            }

            # Build interactive sampler
            interactive_beam_searcher = InteractiveBeamSearchSampler(
                models,
                dataset,
                params_prediction,
                excluded_words=None,
                verbose=args.verbose)
            start_time = time.time()

            if args.verbose:
                logging.info("Params prediction = " + str(params_prediction))
                if args.online:
                    logging.info("Params training = " + str(params_training))
            n_samples = getattr(dataset, 'len_' + s)
            if args.references is None:
                all_references = dataset.extra_variables[s][
                    params['OUTPUTS_IDS_DATASET'][0]]

            # Start to translate the source file interactively
            for n_sample in range(n_samples):
                errors_sentence = 0
                mouse_actions_sentence = 0
                hypothesis_number = 0
                # Load data from dataset
                current_input = dataset.getX_FromIndices(
                    s, [n_sample],
                    normalization_type=params_prediction.get(
                        'normalization_type'),
                    normalization=params_prediction.get('normalize', False),
                    dataAugmentation=params_prediction.get(
                        'data_augmentation', False),
                    wo_da_patch_type=params_prediction.get(
                        'wo_da_patch_type', 'whole'),
                    da_patch_type=params_prediction.get(
                        'da_patch_type', 'resize_and_rndcrop'),
                    da_enhance_list=params_prediction.get(
                        'da_enhance_list', None))[0][0]

                # Load references
                references = all_references[n_sample]

                tokenized_references = list(map(
                    tokenize_f,
                    references)) if args.tokenize_references else references

                # Get reference as desired by the user, i.e. detokenized if necessary
                reference = list(map(params_prediction['detokenize_f'], tokenized_references)) if \
                    args.detokenize_bpe else tokenized_references

                # Detokenize line for nicer logging :)
                logger.debug(u'\n\nProcessing sample %d' % (n_sample + 1))
                logger.debug(u'Target: %s' % reference)

                # 1. Get a first hypothesis
                trans_indices, costs, alphas = interactive_beam_searcher.sample_beam_search_interactive(
                    current_input)

                # 1.2 Decode hypothesis
                hypothesis = decode_predictions_beam_search([trans_indices],
                                                            index2word_y,
                                                            pad_sequences=True,
                                                            verbose=0)[0]
                # 1.3 Store result (optional)
                hypothesis = params_prediction['detokenize_f'](hypothesis) \
                    if params_prediction.get('apply_detokenization', False) else hypothesis
                if args.original_dest is not None:
                    if params['SAMPLING_SAVE_MODE'] == 'list':
                        list2file(args.original_dest, [hypothesis],
                                  permission='a')
                    else:
                        raise Exception(
                            'Only "list" is allowed in "SAMPLING_SAVE_MODE"')
                logger.debug(u'Hypo_%d: %s' % (hypothesis_number, hypothesis))

                # 2.0 Interactive translation
                if hypothesis in tokenized_references:
                    # 2.1 If the sentence is correct, we  validate it
                    pass
                else:
                    # 2.2 Wrong hypothesis -> Interactively translate the sentence
                    correct_hypothesis = False
                    last_correct_pos = 0
                    while not correct_hypothesis:
                        # 2.2.1 Empty data structures for the next sentence
                        fixed_words_user = OrderedDict()
                        unk_words_dict = OrderedDict()
                        isle_indices = []
                        unks_in_isles = []

                        if args.prefix:
                            # 2.2.2 Compute longest common character prefix (LCCP)
                            reference_idx, next_correction_pos, validated_prefix = common_prefixes(
                                hypothesis, tokenized_references)
                        else:
                            # 2.2.2 Compute common character segments
                            #TODO
                            next_correction_pos, validated_prefix, validated_segments = common_segments(
                                hypothesis, reference)
                        reference = tokenized_references[reference_idx]
                        if next_correction_pos == len(reference):
                            correct_hypothesis = True
                            break
                        # 2.2.3 Get next correction by checking against the reference
                        next_correction = reference[next_correction_pos]

                        # 2.2.4 Tokenize the prefix properly (possibly applying BPE)
                        tokenized_validated_prefix = tokenize_f(
                            validated_prefix + next_correction)

                        # 2.2.5 Validate words
                        for pos, word in enumerate(
                                tokenized_validated_prefix.split()):
                            fixed_words_user[pos] = word2index_y.get(
                                word, unk_id)
                            if word2index_y.get(word) is None:
                                unk_words_dict[pos] = word

                        # 2.2.6 Constrain search for the last word
                        last_user_word_pos = list(fixed_words_user.keys())[-1]
                        if next_correction != u' ':
                            last_user_word = tokenized_validated_prefix.split(
                            )[-1]
                            filtered_idx2word = dict(
                                (word2index_y[candidate_word], candidate_word)
                                for candidate_word in word2index_y
                                if candidate_word[:len(last_user_word)] ==
                                last_user_word)
                            if filtered_idx2word != dict():
                                del fixed_words_user[last_user_word_pos]
                                if last_user_word_pos in unk_words_dict.keys():
                                    del unk_words_dict[last_user_word_pos]
                        else:
                            filtered_idx2word = dict()

                        logger.debug(u'"%s" to character %d.' %
                                     (next_correction, next_correction_pos))

                        # 2.2.7 Generate a hypothesis compatible with the feedback provided by the user
                        hypothesis = generate_constrained_hypothesis(
                            interactive_beam_searcher, current_input,
                            fixed_words_user, params_prediction, args,
                            isle_indices, filtered_idx2word, index2word_y,
                            None, None, None, unk_words_dict.keys(),
                            unk_words_dict.values(), unks_in_isles)
                        hypothesis_number += 1
                        hypothesis = u' '.join(
                            hypothesis)  # Hypothesis is unicode
                        hypothesis = params_prediction['detokenize_f'](hypothesis) \
                            if args.detokenize_bpe else hypothesis
                        logger.debug(u'Target: %s' % reference)
                        logger.debug(u"Hypo_%d: %s" %
                                     (hypothesis_number, hypothesis))
                        # 2.2.8 Add a keystroke
                        errors_sentence += 1
                        # 2.2.9 Add a mouse action if we moved the pointer
                        if next_correction_pos - last_correct_pos > 1:
                            mouse_actions_sentence += 1
                        last_correct_pos = next_correction_pos

                    # 2.3 Final check: The reference is a subset of the hypothesis: Cut the hypothesis
                    if len(reference) < len(hypothesis):
                        hypothesis = hypothesis[:len(reference)]
                        errors_sentence += 1
                        logger.debug(u"Cutting hypothesis")

                # 2.4 Security assertion
                assert hypothesis in references, "Error: The final hypothesis does not match with the reference! \n" \
                                                "\t Split: %s \n" \
                                                "\t Sentence: %d \n" \
                                                "\t Hypothesis: %s\n" \
                                                "\t Reference: %s" % (s, n_sample + 1,
                                                                      hypothesis,
                                                                      reference)
                # 3. Update user effort counters
                mouse_actions_sentence += 1  # This +1 is the validation action
                chars_sentence = len(hypothesis)
                total_errors += errors_sentence
                total_words += len(hypothesis.split())
                total_chars += chars_sentence
                total_mouse_actions += mouse_actions_sentence

                # 3.1 Log some info
                logger.debug(u"Final hypotesis: %s" % hypothesis)
                logger.debug(
                    u"%d errors. "
                    u"Sentence WSR: %4f. "
                    u"Sentence mouse strokes: %d "
                    u"Sentence MAR: %4f. "
                    u"Sentence MAR_c: %4f. "
                    u"Sentence KSMR: %4f. "
                    u"Accumulated (should only be considered for debugging purposes!) "
                    u"WSR: %4f. "
                    u"MAR: %4f. "
                    u"MAR_c: %4f. "
                    u"KSMR: %4f.\n\n\n\n" %
                    (errors_sentence, float(errors_sentence) / len(hypothesis),
                     mouse_actions_sentence,
                     float(mouse_actions_sentence) / len(hypothesis),
                     float(mouse_actions_sentence) / chars_sentence,
                     float(errors_sentence + mouse_actions_sentence) /
                     chars_sentence, float(total_errors) / total_words,
                     float(total_mouse_actions) / total_words,
                     float(total_mouse_actions) / total_chars,
                     float(total_errors + total_mouse_actions) / total_chars))
                # 4. If we are performing OL after each correct sample:
                if args.online:
                    # 4.1 Compute model inputs
                    # 4.1.1 Source text -> Already computed (used for the INMT process)
                    # 4.1.2 State below
                    state_below = dataset.loadText(
                        [reference],
                        vocabularies=dataset.vocabulary[
                            params['OUTPUTS_IDS_DATASET'][0]],
                        max_len=params['MAX_OUTPUT_TEXT_LEN_TEST'],
                        offset=1,
                        fill=dataset.fill_text[params['INPUTS_IDS_DATASET']
                                               [-1]],
                        pad_on_batch=dataset.pad_on_batch[
                            params['INPUTS_IDS_DATASET'][-1]],
                        words_so_far=False,
                        loading_X=True)[0]

                    # 4.1.3 Ground truth sample -> Interactively translated sentence
                    trg_seq = dataset.loadTextOneHot(
                        [reference],
                        vocabularies=dataset.vocabulary[
                            params['OUTPUTS_IDS_DATASET'][0]],
                        vocabulary_len=dataset.vocabulary_len[
                            params['OUTPUTS_IDS_DATASET'][0]],
                        max_len=params['MAX_OUTPUT_TEXT_LEN_TEST'],
                        offset=0,
                        fill=dataset.fill_text[params['OUTPUTS_IDS_DATASET']
                                               [0]],
                        pad_on_batch=dataset.pad_on_batch[
                            params['OUTPUTS_IDS_DATASET'][0]],
                        words_so_far=False,
                        sample_weights=params['SAMPLE_WEIGHTS'],
                        loading_X=False)
                    # 4.2 Train online!
                    online_trainer.train_online(
                        [np.asarray([current_input]), state_below],
                        trg_seq,
                        trg_words=[reference])
                # 5 Write correct sentences into a file
                list2file(args.dest, [hypothesis], permission='a')

                if (n_sample + 1) % 50 == 0:
                    logger.info(u"%d sentences processed" % (n_sample + 1))
                    logger.info(u"Current speed is {} per sentence".format(
                        (time.time() - start_time) / (n_sample + 1)))
                    logger.info(u"Current WSR is: %f" %
                                (float(total_errors) / total_words))
                    logger.info(u"Current MAR is: %f" %
                                (float(total_mouse_actions) / total_words))
                    logger.info(u"Current MAR_c is: %f" %
                                (float(total_mouse_actions) / total_chars))
                    logger.info(u"Current KSMR is: %f" %
                                (float(total_errors + total_mouse_actions) /
                                 total_chars))
        # 6. Final!
        # 6.1 Log some information
        print(u"Total number of errors:", total_errors)
        print(u"Total number selections", total_mouse_actions)
        print(u"WSR: %f" % (float(total_errors) / total_words))
        print(u"MAR: %f" % (float(total_mouse_actions) / total_words))
        print(u"MAR_c: %f" % (float(total_mouse_actions) / total_chars))
        print(u"KSMR: %f" %
              (float(total_errors + total_mouse_actions) / total_chars))

    except KeyboardInterrupt:
        print(u'Interrupted!')
        print(u"Total number of corrections (up to now):", total_errors)
        print(u"WSR: %f" % (float(total_errors) / total_words))
        print(u"MAR: %f" % (float(total_mouse_actions) / total_words))
        print(u"MAR_c: %f" % (float(total_mouse_actions) / total_chars))
        print(u"KSMR: %f" %
              (float(total_errors + total_mouse_actions) / total_chars))
Exemplo n.º 16
0
    def generate_sample(self, source_sentence, validated_prefix=None, max_N=5, isle_indices=None,
                        filtered_idx2word=None, unk_indices=None, unk_words=None):
        print ("In params prediction beam_size: ", self.params_prediction['beam_size'])
        logger.log(2, 'Beam size: %d' % (self.params_prediction['beam_size']))
        generate_sample_start_time = time.time()
        if unk_indices is None:
            unk_indices = []
        if unk_words is None:
            unk_words = []

        tokenization_start_time = time.time()
        tokenized_input = self.general_tokenize_f(source_sentence, escape=False)
        tokenized_input = self.model_tokenize_f(tokenized_input)
        tokenization_end_time = time.time()
        logger.log(2, 'tokenization time: %.6f' % (tokenization_end_time - tokenization_start_time))
        parse_input_start_time = time.time()
        src_seq, src_words = parse_input(tokenized_input, self.dataset, self.word2index_x)
        parse_input_end_time = time.time()
        logger.log(2, 'parse_input time: %.6f' % (parse_input_end_time - parse_input_start_time))

        fixed_words_user = OrderedDict()
        unk_words_dict = OrderedDict()
        # If the user provided some feedback...
        if validated_prefix is not None:
            next_correction = validated_prefix[-1]
            if next_correction == self.eos_symbol:
                return validated_prefix[:-1].decode('utf-8')

            # 2.2.4 Tokenize the prefix properly (possibly applying BPE)
            #  TODO: Here we are tokenizing the target language with the source language tokenizer
            prefix_tokenization_start_time = time.time()
            tokenized_validated_prefix = self.general_tokenize_f(validated_prefix, escape=False)
            tokenized_validated_prefix = self.model_tokenize_f(tokenized_validated_prefix)
            prefix_tokenization_end_time = time.time()
            logger.log(2, 'prefix_tokenization time: %.6f' % (prefix_tokenization_end_time - prefix_tokenization_start_time))

            # 2.2.5 Validate words
            word_validation_start_time = time.time()
            for pos, word in enumerate(tokenized_validated_prefix.split()):
                fixed_words_user[pos] = self.word2index_y.get(word, self.unk_id)
                if self.word2index_y.get(word) is None:
                    unk_words_dict[pos] = word
            word_validation_end_time = time.time()
            logger.log(2, 'word_validation time: %.6f' % (word_validation_end_time - word_validation_start_time))

            # 2.2.6 Constrain search for the last word
            constrain_search_start_time = time.time()
            last_user_word_pos = list(fixed_words_user.keys())[-1]
            if next_correction != u' ':
                last_user_word = tokenized_validated_prefix.split()[-1]
                filtered_idx2word = dict((self.word2index_y[candidate_word], candidate_word)
                                         for candidate_word in self.word2index_y if candidate_word[:len(last_user_word)] == last_user_word)

                # if candidate_word.decode('utf-8')[:len(last_user_word)] == last_user_word)
                if filtered_idx2word != dict():
                    del fixed_words_user[last_user_word_pos]
                    if last_user_word_pos in list(unk_words_dict.keys()):
                        del unk_words_dict[last_user_word_pos]
            else:
                filtered_idx2word = dict()
            constrain_search_end_time = time.time()
            logger.log(2, 'constrain_search_end_time time: %.6f' % (constrain_search_end_time - constrain_search_start_time))

        sample_beam_search_start_time = time.time()
        trans_indices, costs, alphas = \
            self.interactive_beam_searcher.sample_beam_search_interactive(src_seq,
                                                                          fixed_words=copy.copy(fixed_words_user),
                                                                          max_N=max_N,
                                                                          isles=isle_indices,
                                                                          valid_next_words=filtered_idx2word,
                                                                          idx2word=self.index2word_y)
        sample_beam_search_end_time = time.time()
        logger.log(2, 'sample_beam_search time: %.6f' % (sample_beam_search_end_time - sample_beam_search_start_time))

        # # Substitute possible unknown words in isles
        # unk_in_isles = []
        # for isle_idx, isle_sequence, isle_words in unks_in_isles:
        #     if unk_id in isle_sequence:
        #         unk_in_isles.append((subfinder(isle_sequence, list(trans_indices)), isle_words))

        if False and self.params_prediction['pos_unk']:
            alphas = [alphas]
            sources = [tokenized_input]
            heuristic = self.params_prediction['heuristic']
        else:
            alphas = None
            heuristic = None
            sources = None

        # 1.2 Decode hypothesis
        decoding_predictions_start_time = time.time()
        hypothesis = decode_predictions_beam_search([trans_indices],
                                                    self.index2word_y,
                                                    alphas=alphas,
                                                    x_text=sources,
                                                    heuristic=heuristic,
                                                    mapping=self.mapping,
                                                    pad_sequences=True,
                                                    verbose=0)[0]
        decoding_predictions_end_time = time.time()
        logger.log(2, 'decoding_predictions time: %.6f' % (decoding_predictions_end_time - decoding_predictions_start_time))

        # for (words_idx, starting_pos), words in unk_in_isles:
        #     for pos_unk_word, pos_hypothesis in enumerate(range(starting_pos, starting_pos + len(words_idx))):
        #         hypothesis[pos_hypothesis] = words[pos_unk_word]

        # UNK words management
        unk_management_start_time = time.time()
        unk_indices = list(unk_words_dict)
        unk_words = list(unk_words_dict.values())
        if len(unk_indices) > 0:  # If we added some UNK word
            hypothesis = hypothesis.split()
            if len(hypothesis) < len(unk_indices):  # The full hypothesis will be made up UNK words:
                for i, index in enumerate(range(0, len(hypothesis))):
                    hypothesis[index] = unk_words[unk_indices[i]]
                for ii in range(i + 1, len(unk_words)):
                    hypothesis.append(unk_words[ii])
            else:  # We put each unknown word in the corresponding gap
                for i, index in enumerate(unk_indices):
                    if index < len(hypothesis):
                        hypothesis[index] = unk_words[i]
                    else:
                        hypothesis.append(unk_words[i])
            hypothesis = u' '.join(hypothesis)
        unk_management_end_time = time.time()
        logger.log(2, 'unk_management time: %.6f' % (unk_management_end_time - unk_management_start_time))

        hypothesis_detokenization_start_time = time.time()
        hypothesis = self.model_detokenize_f(hypothesis)
        hypothesis = self.general_detokenize_f(hypothesis, unescape=False)
        hypothesis_detokenization_end_time = time.time()
        logger.log(2, 'hypothesis_detokenization time: %.6f' % (hypothesis_detokenization_end_time - hypothesis_detokenization_start_time))
        generate_sample_end_time = time.time()
        logger.log(2, 'generate_sample time: %.6f' % (generate_sample_end_time - generate_sample_start_time))
        return hypothesis
Exemplo n.º 17
0
            samples = predictions[0]
            alphas = predictions[1]
            sources = [
                x.strip() for x in open(args.text, 'r').read().split('\n')
            ]
            sources = sources[:-1] if len(sources[-1]) == 0 else sources
        else:
            samples = predictions
            alphas = None
            heuristic = None
            sources = None

        predictions = decode_predictions_beam_search(samples,
                                                     index2word_y,
                                                     alphas=alphas,
                                                     x_text=sources,
                                                     heuristic=heuristic,
                                                     mapping=mapping,
                                                     verbose=args.verbose)
        # Apply detokenization function if needed
        if params.get('APPLY_DETOKENIZATION', False):
            predictions = map(detokenize_function, predictions)

        if args.n_best:
            n_best_predictions = []
            i = 0
            for i, (n_best_preds, n_best_scores,
                    n_best_alphas) in enumerate(n_best):
                n_best_sample_score = []
                for n_best_pred, n_best_score, n_best_alpha in zip(
                        n_best_preds, n_best_scores, n_best_alphas):
def interactive_translation(
    src_seq,
    src_line,
    trg_line,
    params_prediction,
    args,
    tokenize_f,
    index2word_y,
    word2index_y,
    index2word_x,
    word2index_x,
    unk_id,
    total_errors,
    total_mouse_actions,
    n_line=-1,
):
    errors_sentence = 0
    mouse_actions_sentence = 0
    hypothesis_number = 0
    # Get (tokenized) input
    tokenized_reference = tokenize_f(
        trg_line) if args.tokenize_references else trg_line

    # Get reference as desired by the user, i.e. detokenized if necessary
    reference = params_prediction['detokenize_f'](tokenized_reference) if \
        args.detokenize_bpe else tokenized_reference

    # Detokenize line for nicer logging :)
    if args.detokenize_bpe:
        src_line = params_prediction['detokenize_f'](src_line)

    logger.debug(u'\n\nProcessing sentence %d' % n_line)
    logger.debug(u'Source: %s' % src_line)
    logger.debug(u'Target: %s' % reference)

    # 1. Get a first hypothesis
    trans_indices, costs, alphas = interactive_beam_searcher.sample_beam_search_interactive(
        src_seq)
    # 1.1 Set unk replacemet strategy
    if params_prediction['pos_unk']:
        alphas = [alphas]
        sources = [tokenized_input]
        heuristic = params_prediction['heuristic']
    else:
        alphas = None
        heuristic = None
        sources = None

    # 1.2 Decode hypothesis
    hypothesis = decode_predictions_beam_search([trans_indices],
                                                index2word_y,
                                                alphas=alphas,
                                                x_text=sources,
                                                heuristic=heuristic,
                                                mapping=mapping,
                                                pad_sequences=True,
                                                verbose=0)[0]
    # 1.3 Store result (optional)
    hypothesis = params_prediction['detokenize_f'](hypothesis) \
        if params_prediction.get('apply_detokenization', False) else hypothesis
    if args.original_dest is not None:
        filepath = args.original_dest  # results file
        if params_prediction['SAMPLING_SAVE_MODE'] == 'list':
            list2file(filepath, [hypothesis + '\n'], permission='a')
        else:
            raise Exception('Only "list" is allowed in "SAMPLING_SAVE_MODE"')
    logger.debug(u'Hypo_%d: %s' % (hypothesis_number, hypothesis))
    # 2.0 Interactive translation
    if hypothesis == reference:
        # 2.1 If the sentence is correct, we  validate it
        pass
    else:
        # 2.2 Wrong hypothesis -> Interactively translate the sentence
        correct_hypothesis = False
        last_correct_pos = 0
        while not correct_hypothesis:
            # 2.2.1 Empty data structures for the next sentence
            fixed_words_user = OrderedDict()
            unk_words_dict = OrderedDict()
            if not args.prefix:
                Exception(
                    NotImplementedError, 'Segment-based interaction at'
                    ' character level is still unimplemented')
            else:
                isle_indices = []
                unks_in_isles = []

            # 2.2.2 Compute longest common character prefix (LCCP)
            next_correction_pos, validated_prefix = common_prefix(
                hypothesis, reference)
            if next_correction_pos == len(reference):
                correct_hypothesis = True
                break
            # 2.2.3 Get next correction by checking against the reference
            next_correction = reference[next_correction_pos]
            # 2.2.4 Tokenize the prefix properly (possibly applying BPE)
            tokenized_validated_prefix = tokenize_f(validated_prefix +
                                                    next_correction)

            # 2.2.5 Validate words
            for pos, word in enumerate(tokenized_validated_prefix.split()):
                fixed_words_user[pos] = word2index_y.get(word, unk_id)
                if word2index_y.get(word) is None:
                    unk_words_dict[pos] = word

            # 2.2.6 Constrain search for the last word
            last_user_word_pos = fixed_words_user.keys()[-1]
            if next_correction != u' ':
                last_user_word = tokenized_validated_prefix.split()[-1]
                filtered_idx2word = dict(
                    (word2index_y[candidate_word], candidate_word)
                    for candidate_word in word2index_y
                    if candidate_word.decode(
                        'utf-8')[:len(last_user_word)] == last_user_word)
                if filtered_idx2word != dict():
                    del fixed_words_user[last_user_word_pos]
                    if last_user_word_pos in unk_words_dict.keys():
                        del unk_words_dict[last_user_word_pos]
            else:
                filtered_idx2word = dict()

            logger.debug(u'"%s" to character %d.' %
                         (next_correction, next_correction_pos))

            # 2.2.7 Generate a hypothesis compatible with the feedback provided by the user
            hypothesis = generate_constrained_hypothesis(
                interactive_beam_searcher, src_seq, fixed_words_user,
                params_prediction, args, isle_indices,
                filtered_idx2word, index2word_y, sources, heuristic, mapping,
                unk_words_dict.keys(), unk_words_dict.values(), unks_in_isles)
            hypothesis_number += 1
            hypothesis = u' '.join(hypothesis)  # Hypothesis is unicode
            hypothesis = params_prediction['detokenize_f'](hypothesis) \
                if args.detokenize_bpe else hypothesis
            logger.debug(u'Target: %s' % reference)
            logger.debug(u"Hypo_%d: %s" % (hypothesis_number, hypothesis))
            # 2.2.8 Add a keystroke
            errors_sentence += 1
            # 2.2.9 Add a mouse action if we moved the pointer
            if next_correction_pos - last_correct_pos > 1:
                mouse_actions_sentence += 1
            last_correct_pos = next_correction_pos

        # 2.3 Final check: The reference is a subset of the hypothesis: Cut the hypothesis
        if len(reference) < len(hypothesis):
            hypothesis = hypothesis[:len(reference)]
            errors_sentence += 1
            logger.debug("Cutting hypothesis")

    # 2.4 Security assertion
    assert hypothesis == reference, "Error: The final hypothesis does not match with the reference! \n" \
                                    "\t Split: %s \n" \
                                    "\t Sentence: %d \n" \
                                    "\t Hypothesis: %s\n" \
                                    "\t Reference: %s" % (str(s.encode('utf-8')), n_line,
                                                          hypothesis.encode('utf-8'),
                                                          reference.encode('utf-8'))

    # 3. Update user effort counters
    mouse_actions_sentence += 1  # This +1 is the validation action
    chars_sentence = len(hypothesis)
    total_errors += errors_sentence
    total_mouse_actions += mouse_actions_sentence

    # 3.1 Log some info
    logger.debug(u"Final hypotesis: %s" % hypothesis)
    logger.debug(
        "%d errors. "
        "Sentence WSR: %4f. "
        "Sentence mouse strokes: %d "
        "Sentence MAR: %4f. "
        "Sentence MAR_c: %4f. "
        "Sentence KSMR: %4f. " %
        (errors_sentence, float(errors_sentence) / len(hypothesis),
         mouse_actions_sentence, float(mouse_actions_sentence) /
         len(hypothesis), float(mouse_actions_sentence) / chars_sentence,
         float(errors_sentence + mouse_actions_sentence) / chars_sentence))

    # 5 Write correct sentences into a file
    return hypothesis, total_errors, total_mouse_actions
Exemplo n.º 19
0
    def on_batch_end(self, n_update, logs={}):
        self.cum_update += 1
        if self.epoch_count + self.reload_epoch < self.start_sampling_on_epoch:
            return
        elif self.cum_update % self.each_n_updates != 0:
            return

        # Evaluate on each set separately
        for s in self.set_name:
            # Apply model predictions
            params_prediction = {
                'batch_size': self.batch_size,
                'n_parallel_loaders': self.extra_vars['n_parallel_loaders'],
                'predict_on_sets': [s],
                'n_samples': self.n_samples,
                'pos_unk': False,
                'heuristic': 0,
                'mapping': None
            }
            if self.beam_search:
                params_prediction.update(
                    checkDefaultParamsBeamSearch(self.extra_vars))
                predictions, truths, sources = self.model_to_eval.predictBeamSearchNet(
                    self.ds, params_prediction)
            else:
                # Convert predictions
                postprocess_fun = None
                if self.is_3DLabel:
                    postprocess_fun = [
                        self.ds.convert_3DLabels_to_bboxes,
                        self.extra_vars[s]['references_orig_sizes']
                    ]
                predictions = self.model_to_eval.predictNet(
                    self.ds,
                    params_prediction,
                    postprocess_fun=postprocess_fun)

            if self.print_sources:
                if self.in_pred_idx is not None:
                    sources = [srcs for srcs in sources[0][self.in_pred_idx]]
                sources = decode_predictions_beam_search(sources,
                                                         self.index2word_x,
                                                         pad_sequences=True,
                                                         verbose=self.verbose)

            if s in predictions:
                if params_prediction['pos_unk']:
                    samples = predictions[s][0]
                    alphas = predictions[s][1]
                    heuristic = params_prediction['heuristic']
                else:
                    samples = predictions[s]
                    alphas = None
                    heuristic = None

                predictions = predictions[s]
                if self.is_text:
                    if self.out_pred_idx is not None:
                        samples = samples[self.out_pred_idx]
                    # Convert predictions into sentences
                    if self.beam_search:
                        predictions = decode_predictions_beam_search(
                            samples,
                            self.index2word_y,
                            alphas=alphas,
                            x_text=sources,
                            heuristic=heuristic,
                            mapping=params_prediction['mapping'],
                            verbose=self.verbose)
                    else:
                        predictions = decode_predictions(samples,
                                                         1,
                                                         self.index2word_y,
                                                         self.sampling_type,
                                                         verbose=self.verbose)
                    truths = decode_predictions_one_hot(truths,
                                                        self.index2word_y,
                                                        verbose=self.verbose)

                # Write samples
                if self.print_sources:
                    # Write samples
                    for i, (source, sample, truth) in enumerate(
                            zip(sources, predictions, truths)):
                        print("Source     (%d): %s" % (i, source))
                        print("Hypothesis (%d): %s" % (i, sample))
                        print("Reference  (%d): %s" % (i, truth))
                        print("")
                else:
                    for i, (sample,
                            truth) in enumerate(zip(predictions, truths)):
                        print("Hypothesis (%d): %s" % (i, sample))
                        print("Reference  (%d): %s" % (i, truth))
                        print("")
Exemplo n.º 20
0
def sample_ensemble(args, params):
    """
    Use several translation models for obtaining predictions from a source text file.

    :param argparse.Namespace args: Arguments given to the method:

                      * dataset: Dataset instance with data.
                      * text: Text file with source sentences.
                      * splits: Splits to sample. Should be already included in the dataset object.
                      * dest: Output file to save scores.
                      * weights: Weight given to each model in the ensemble. You should provide the same number of weights than models. By default, it applies the same weight to each model (1/N).
                      * n_best: Write n-best list (n = beam size).
                      * config: Config .pkl for loading the model configuration. If not specified, hyperparameters are read from config.py.
                      * models: Path to the models.
                      * verbose: Be verbose or not.

    :param params: parameters of the translation model.
    """
    from data_engine.prepare_data import update_dataset_from_file
    from keras_wrapper.model_ensemble import BeamSearchEnsemble
    from keras_wrapper.cnn_model import loadModel
    from keras_wrapper.dataset import loadDataset
    from keras_wrapper.utils import decode_predictions_beam_search

    logger.info("Using an ensemble of %d models" % len(args.models))
    models = [loadModel(m, -1, full_path=True) for m in args.models]
    dataset = loadDataset(args.dataset)
    dataset = update_dataset_from_file(dataset, args.text, params, splits=args.splits, remove_outputs=True)

    params['INPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len[params['INPUTS_IDS_DATASET'][0]]
    params['OUTPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len[params['OUTPUTS_IDS_DATASET'][0]]
    # For converting predictions into sentences
    index2word_y = dataset.vocabulary[params['OUTPUTS_IDS_DATASET'][0]]['idx2words']

    if params.get('APPLY_DETOKENIZATION', False):
        detokenize_function = eval('dataset.' + params['DETOKENIZATION_METHOD'])

    params_prediction = dict()
    params_prediction['max_batch_size'] = params.get('BATCH_SIZE', 20)
    params_prediction['n_parallel_loaders'] = params.get('PARALLEL_LOADERS', 1)
    params_prediction['beam_size'] = params.get('BEAM_SIZE', 6)
    params_prediction['maxlen'] = params.get('MAX_OUTPUT_TEXT_LEN_TEST', 100)
    params_prediction['optimized_search'] = params['OPTIMIZED_SEARCH']
    params_prediction['model_inputs'] = params['INPUTS_IDS_MODEL']
    params_prediction['model_outputs'] = params['OUTPUTS_IDS_MODEL']
    params_prediction['dataset_inputs'] = params['INPUTS_IDS_DATASET']
    params_prediction['dataset_outputs'] = params['OUTPUTS_IDS_DATASET']
    params_prediction['search_pruning'] = params.get('SEARCH_PRUNING', False)
    params_prediction['normalize_probs'] = params.get('NORMALIZE_SAMPLING', False)
    params_prediction['alpha_factor'] = params.get('ALPHA_FACTOR', 1.0)
    params_prediction['coverage_penalty'] = params.get('COVERAGE_PENALTY', False)
    params_prediction['length_penalty'] = params.get('LENGTH_PENALTY', False)
    params_prediction['length_norm_factor'] = params.get('LENGTH_NORM_FACTOR', 0.0)
    params_prediction['coverage_norm_factor'] = params.get('COVERAGE_NORM_FACTOR', 0.0)
    params_prediction['pos_unk'] = params.get('POS_UNK', False)
    params_prediction['state_below_maxlen'] = -1 if params.get('PAD_ON_BATCH', True) \
        else params.get('MAX_OUTPUT_TEXT_LEN', 50)
    params_prediction['output_max_length_depending_on_x'] = params.get('MAXLEN_GIVEN_X', True)
    params_prediction['output_max_length_depending_on_x_factor'] = params.get('MAXLEN_GIVEN_X_FACTOR', 3)
    params_prediction['output_min_length_depending_on_x'] = params.get('MINLEN_GIVEN_X', True)
    params_prediction['output_min_length_depending_on_x_factor'] = params.get('MINLEN_GIVEN_X_FACTOR', 2)
    params_prediction['attend_on_output'] = params.get('ATTEND_ON_OUTPUT',
                                                       'transformer' in params['MODEL_TYPE'].lower())
    params_prediction['glossary'] = params.get('GLOSSARY', None)

    heuristic = params.get('HEURISTIC', 0)
    mapping = None if dataset.mapping == dict() else dataset.mapping
    model_weights = args.weights

    if args.glossary is not None:
        glossary = pkl2dict(args.glossary)
    elif params_prediction['glossary'] is not None:
        glossary = pkl2dict(params_prediction['glossary'])
    else:
        glossary = None

    if model_weights:
        assert len(model_weights) == len(
            models), 'You should give a weight to each model. You gave %d models and %d weights.' % (
            len(models), len(model_weights))
        model_weights = list(map(float, model_weights))
        if len(model_weights) > 1:
            logger.info('Giving the following weights to each model: %s' % str(model_weights))

    for s in args.splits:
        # Apply model predictions
        params_prediction['predict_on_sets'] = [s]
        beam_searcher = BeamSearchEnsemble(models,
                                           dataset,
                                           params_prediction,
                                           model_weights=model_weights,
                                           n_best=args.n_best,
                                           verbose=args.verbose)
        predictions = beam_searcher.predictBeamSearchNet()[s]
        samples = predictions['samples']
        alphas = predictions['alphas'] if params_prediction['pos_unk'] else None

        if params_prediction['pos_unk']:
            sources = [x.strip() for x in open(args.text, 'r').read().split('\n')]
            sources = sources[:-1] if len(sources[-1]) == 0 else sources
        else:
            sources = None

        decoded_predictions = decode_predictions_beam_search(samples,
                                                             index2word_y,
                                                             glossary=glossary,
                                                             alphas=alphas,
                                                             x_text=sources,
                                                             heuristic=heuristic,
                                                             mapping=mapping,
                                                             verbose=args.verbose)
        # Apply detokenization function if needed
        if params.get('APPLY_DETOKENIZATION', False):
            decoded_predictions = list(map(detokenize_function, decoded_predictions))

        if args.n_best:
            n_best_predictions = []
            for i, (n_best_preds, n_best_scores, n_best_alphas) in enumerate(predictions['n_best']):
                n_best_sample_score = []
                for n_best_pred, n_best_score, n_best_alpha in zip(n_best_preds, n_best_scores, n_best_alphas):
                    pred = decode_predictions_beam_search([n_best_pred],
                                                          index2word_y,
                                                          glossary=glossary,
                                                          alphas=[n_best_alpha] if params_prediction[
                                                              'pos_unk'] else None,
                                                          x_text=[sources[i]] if params_prediction['pos_unk'] else None,
                                                          heuristic=heuristic,
                                                          mapping=mapping,
                                                          verbose=args.verbose)
                    # Apply detokenization function if needed
                    if params.get('APPLY_DETOKENIZATION', False):
                        pred = list(map(detokenize_function, pred))

                    n_best_sample_score.append([i, pred, n_best_score])
                n_best_predictions.append(n_best_sample_score)
        # Store result
        if args.dest is not None:
            filepath = args.dest  # results file
            if params.get('SAMPLING_SAVE_MODE', 'list'):
                list2file(filepath, decoded_predictions)
                if args.n_best:
                    nbest2file(filepath + '.nbest', n_best_predictions)
            else:
                raise Exception('Only "list" is allowed in "SAMPLING_SAVE_MODE"')
        else:
            list2stdout(decoded_predictions)
            if args.n_best:
                logger.info('Storing n-best sentences in ./' + s + '.nbest')
                nbest2file('./' + s + '.nbest', n_best_predictions)
        logger.info('Sampling finished')
Exemplo n.º 21
0
    def generate_sample(self,
                        source_sentence,
                        validated_prefix=None,
                        max_N=5,
                        isle_indices=None,
                        filtered_idx2word=None,
                        unk_indices=None,
                        unk_words=None):
        """
        Generate sample via constrained search. Options labeled with <<isles>> are untested
        and likely require some modifications to correctly work.
        :param source_sentence: Source sentence.
        :param validated_prefix: Prefix to keep in the output.
        :param max_N: Maximum number of words to generate between validated segments. <<isles>>
        :param isle_indices: Indices of the validated segments. <<isles>>
        :param filtered_idx2word: List of candidate words to be the next one to generate (after generating fixed_words).
        :param unk_indices: Positions of the unknown words.
        :param unk_words: Unknown words.
        :return:
        """
        logger.log(2, 'Beam size: %d' % (self.params_prediction['beam_size']))
        generate_sample_start_time = time.time()
        if unk_indices is None:
            unk_indices = []
        if unk_words is None:
            unk_words = []

        tokenization_start_time = time.time()
        tokenized_input = self.general_tokenize_f(source_sentence,
                                                  escape=False)
        tokenized_input = self.model_tokenize_f(tokenized_input)
        tokenization_end_time = time.time()
        logger.log(
            2, 'tokenization time: %.6f' %
            (tokenization_end_time - tokenization_start_time))
        parse_input_start_time = time.time()
        # Go from text to indices
        src_seq = self.dataset.loadText(
            [tokenized_input],
            vocabularies=self.dataset.vocabulary[
                self.params['INPUTS_IDS_DATASET'][0]],
            max_len=self.params['MAX_INPUT_TEXT_LEN'],
            offset=0,
            fill=self.dataset.fill_text[self.params['INPUTS_IDS_DATASET'][0]],
            pad_on_batch=self.dataset.pad_on_batch[
                self.params['INPUTS_IDS_DATASET'][0]],
            words_so_far=False,
            loading_X=True)[0][0]

        parse_input_end_time = time.time()
        logger.log(
            2, 'parse_input time: %.6f' %
            (parse_input_end_time - parse_input_start_time))

        fixed_words_user = OrderedDict()
        unk_words_dict = OrderedDict()
        # If the user provided some feedback...
        if validated_prefix is not None:
            next_correction = validated_prefix[-1]
            if next_correction == self.eos_symbol:
                return validated_prefix[:-1].decode('utf-8')

            # 2.2.4 Tokenize the prefix properly (possibly applying BPE)
            #  TODO: Here we are tokenizing the target language with the source language tokenizer
            prefix_tokenization_start_time = time.time()
            tokenized_validated_prefix = self.general_tokenize_f(
                validated_prefix, escape=False)
            tokenized_validated_prefix = self.model_tokenize_f(
                tokenized_validated_prefix)
            prefix_tokenization_end_time = time.time()
            logger.log(
                2, 'prefix_tokenization time: %.6f' %
                (prefix_tokenization_end_time -
                 prefix_tokenization_start_time))

            # 2.2.5 Validate words
            word_validation_start_time = time.time()
            for pos, word in enumerate(tokenized_validated_prefix.split()):
                fixed_words_user[pos] = self.word2index_y.get(
                    word, self.unk_id)
                if self.word2index_y.get(word) is None:
                    unk_words_dict[pos] = word
            word_validation_end_time = time.time()
            logger.log(
                2, 'word_validation time: %.6f' %
                (word_validation_end_time - word_validation_start_time))

            # 2.2.6 Constrain search for the last word
            constrain_search_start_time = time.time()
            last_user_word_pos = list(fixed_words_user.keys())[-1]
            if next_correction != u' ':
                last_user_word = tokenized_validated_prefix.split()[-1]
                filtered_idx2word = dict(
                    (self.word2index_y[candidate_word], candidate_word)
                    for candidate_word in self.word2index_y
                    if candidate_word[:len(last_user_word)] == last_user_word)

                if filtered_idx2word != dict():
                    del fixed_words_user[last_user_word_pos]
                    if last_user_word_pos in list(unk_words_dict.keys()):
                        del unk_words_dict[last_user_word_pos]
            else:
                filtered_idx2word = dict()
            constrain_search_end_time = time.time()
            logger.log(
                2, 'constrain_search_end_time time: %.6f' %
                (constrain_search_end_time - constrain_search_start_time))

        sample_beam_search_start_time = time.time()
        trans_indices, costs, alphas = \
            self.interactive_beam_searcher.sample_beam_search_interactive(src_seq,
                                                                          fixed_words=copy.copy(fixed_words_user),
                                                                          max_N=max_N,
                                                                          isles=isle_indices,
                                                                          valid_next_words=filtered_idx2word,
                                                                          idx2word=self.index2word_y)
        sample_beam_search_end_time = time.time()
        logger.log(
            2, 'sample_beam_search time: %.6f' %
            (sample_beam_search_end_time - sample_beam_search_start_time))

        if False and self.params_prediction['pos_unk']:
            alphas = [alphas]
            sources = [tokenized_input]
            heuristic = self.params_prediction['heuristic']
        else:
            alphas = None
            heuristic = None
            sources = None

        # 1.2 Decode hypothesis
        decoding_predictions_start_time = time.time()
        hypothesis = decode_predictions_beam_search([trans_indices],
                                                    self.index2word_y,
                                                    alphas=alphas,
                                                    x_text=sources,
                                                    heuristic=heuristic,
                                                    mapping=self.mapping,
                                                    pad_sequences=True,
                                                    verbose=0)[0]
        decoding_predictions_end_time = time.time()
        logger.log(
            2, 'decoding_predictions time: %.6f' %
            (decoding_predictions_end_time - decoding_predictions_start_time))

        # UNK words management
        unk_management_start_time = time.time()
        unk_indices = list(unk_words_dict)
        unk_words = list(unk_words_dict.values())
        if len(unk_indices) > 0:  # If we added some UNK word
            hypothesis = hypothesis.split()
            if len(hypothesis) < len(
                    unk_indices
            ):  # The full hypothesis will be made up UNK words:
                for i, index in enumerate(range(0, len(hypothesis))):
                    hypothesis[index] = unk_words[unk_indices[i]]
                for ii in range(i + 1, len(unk_words)):
                    hypothesis.append(unk_words[ii])
            else:  # We put each unknown word in the corresponding gap
                for i, index in enumerate(unk_indices):
                    if index < len(hypothesis):
                        hypothesis[index] = unk_words[i]
                    else:
                        hypothesis.append(unk_words[i])
            hypothesis = u' '.join(hypothesis)
        unk_management_end_time = time.time()
        logger.log(
            2, 'unk_management time: %.6f' %
            (unk_management_end_time - unk_management_start_time))

        hypothesis_detokenization_start_time = time.time()
        hypothesis = self.model_detokenize_f(hypothesis)
        hypothesis = self.general_detokenize_f(hypothesis, unescape=False)
        hypothesis_detokenization_end_time = time.time()
        logger.log(
            2, 'hypothesis_detokenization time: %.6f' %
            (hypothesis_detokenization_end_time -
             hypothesis_detokenization_start_time))
        generate_sample_end_time = time.time()
        logger.log(
            2, 'generate_sample time: %.6f' %
            (generate_sample_end_time - generate_sample_start_time))
        return hypothesis
Exemplo n.º 22
0
    def evaluate(self, epoch, counter_name='epoch', logs=None):
        if logs is None:
            logs = {}
        # Change inputs and outputs mappings for evaluation
        self.changeInOutMappings()

        # Evaluate on each set separately
        all_metrics = []

        for s in self.set_name:
            # Apply model predictions
            if self.beam_search:
                params_prediction = {'max_batch_size': self.batch_size,
                                     'n_parallel_loaders': self.extra_vars[
                                         'n_parallel_loaders'],
                                     'predict_on_sets': [s],
                                     'beam_batch_size': self.beam_batch_size if
                                     self.beam_batch_size is not None else self.batch_size,
                                     'pos_unk': False,
                                     'normalize': self.normalize,
                                     'normalization_type': self.normalization_type,
                                     'max_eval_samples': self.max_eval_samples
                                     }

                params_prediction.update(checkDefaultParamsBeamSearch(self.extra_vars))
                predictions_all = self.model_to_eval.predictBeamSearchNet(self.ds, params_prediction)[s]
            else:
                orig_size = self.extra_vars.get('eval_orig_size', False)
                params_prediction = {'batch_size': self.batch_size,
                                     'n_parallel_loaders': self.extra_vars.get(
                                         'n_parallel_loaders', 8),
                                     'predict_on_sets': [s],
                                     'normalize': self.normalize,
                                     'normalization_type': self.normalization_type,
                                     'max_eval_samples': self.max_eval_samples,
                                     'model_name': self.model_name,
                                     }
                # Convert predictions
                postprocess_fun = None
                if self.is_3DLabel:
                    postprocess_fun = [self.ds.convert_3DLabels_to_bboxes,
                                       self.extra_vars[s]['references_orig_sizes']]
                elif orig_size:
                    postprocess_fun = [self.ds.resize_semantic_output,
                                       self.extra_vars[s]['eval_orig_size_id']]
                predictions_all = \
                    self.model_to_eval.predictNet(self.ds, params_prediction,
                                                  postprocess_fun=postprocess_fun)[s]

            # Single-output model
            if not self.gt_pos or self.gt_pos == 0 or len(self.gt_pos) == 1:
                if len(predictions_all) != 2:
                    predictions_all = [predictions_all]
                gt_positions = [0]

            # Multi-output model
            else:
                gt_positions = self.gt_pos

            # Select each output to evaluate separately
            for gt_pos, type, these_metrics, gt_id, write_type, index2word_y, index2word_x in zip(
                    gt_positions,
                    self.output_types,
                    self.metric_name,
                    self.gt_id,
                    self.write_type,
                    self.index2word_y,
                    self.index2word_x):

                predictions = predictions_all[gt_pos]

                if self.verbose > 0:
                    print('')
                    logging.info('Prediction output ' + str(gt_pos) + ': ' + str(
                        gt_id) + ' (' + str(type) + ')')

                # Postprocess outputs of type text
                if type == 'text':
                    if params_prediction.get('pos_unk', False):
                        samples = predictions[0]
                        alphas = predictions[1]

                        if eval('self.ds.loaded_raw_' + s + '[0]'):
                            sources = predictions[2]
                        else:
                            sources = []
                            for preds in predictions[2]:
                                for src in preds[self.input_text_id]:
                                    sources.append(src)
                            sources = decode_predictions_beam_search(sources,
                                                                     index2word_x,
                                                                     pad_sequences=True,
                                                                     verbose=self.verbose)
                        heuristic = self.extra_vars['heuristic']
                    else:
                        samples = predictions
                        alphas = None
                        heuristic = None
                        sources = None
                    if self.out_pred_idx is not None:
                        samples = samples[self.out_pred_idx]
                    # Convert predictions into sentences
                    if self.beam_search:
                        predictions = decode_predictions_beam_search(samples,
                                                                     index2word_y,
                                                                     alphas=alphas,
                                                                     x_text=sources,
                                                                     heuristic=heuristic,
                                                                     mapping=self.extra_vars.get(
                                                                         'mapping',
                                                                         None),
                                                                     verbose=self.verbose)
                    else:
                        probs = predictions
                        predictions = decode_predictions(predictions,
                                                         1,
                                                         # always set temperature to 1
                                                         index2word_y,
                                                         self.sampling_type,
                                                         verbose=self.verbose)

                    # Apply detokenization function if needed
                    if self.extra_vars.get('apply_detokenization', False):
                        predictions = map(self.extra_vars['detokenize_f'],
                                          predictions)

                # Postprocess outputs of type binary
                elif type == 'binary':
                    predictions = decode_multilabel(predictions,
                                                    index2word_y,
                                                    min_val=self.min_pred_multilabel[
                                                        gt_pos],
                                                    verbose=self.verbose)

                    # Prepare references
                    # exec ("y_raw = self.ds.Y_" + s + "[gt_id]")
                    y_split = getattr(self.ds, 'Y_' + s)
                    y_raw = y_split[gt_id]
                    self.extra_vars[gt_pos][s]['references'] = self.ds.loadBinary(y_raw, gt_id)

                # Postprocess outputs of type 3DLabel
                elif type == '3DLabel':
                    self.extra_vars[gt_pos][s] = dict()
                    # exec ('ref=self.ds.Y_' + s + '["' + gt_id + '"]')
                    y_split = getattr(self.ds, 'Y_' + s)
                    ref = y_split[gt_id]
                    [ref, original_sizes] = self.ds.convert_GT_3DLabels_to_bboxes(
                        ref)
                    self.extra_vars[gt_pos][s]['references'] = ref
                    self.extra_vars[gt_pos][s]['references_orig_sizes'] = original_sizes

                # Postprocess outputs of type 3DSemanticLabel
                elif type == '3DSemanticLabel':
                    self.extra_vars[gt_pos]['eval_orig_size'] = self.eval_orig_size
                    self.extra_vars[gt_pos][s] = dict()
                    # exec ('ref=self.ds.Y_' + s + '["' + gt_id + '"]')
                    y_split = getattr(self.ds, 'Y_' + s)
                    ref = y_split[gt_id]
                    if self.eval_orig_size:
                        old_crop = copy.deepcopy(self.ds.img_size_crop)
                        self.ds.img_size_crop = copy.deepcopy(self.ds.img_size)
                        self.extra_vars[gt_pos][s]['eval_orig_size_id'] = np.array([gt_id] * len(ref))
                    ref = self.ds.load_GT_3DSemanticLabels(ref, gt_id)
                    if self.eval_orig_size:
                        self.ds.img_size_crop = copy.deepcopy(old_crop)
                    self.extra_vars[gt_pos][s]['references'] = ref

                # Other output data types
                else:
                    # exec ("self.extra_vars[gt_pos][s]['references'] = self.ds.Y_" + s + "[gt_id]")
                    y_split = getattr(self.ds, 'Y_' + s)
                    self.extra_vars[gt_pos][s]['references'] = y_split[gt_id]
                # Store predictions
                if self.write_samples:
                    # Store result
                    filepath = self.save_path + '/' + s + '_' + counter_name + '_' + str(epoch) + '_output_' + str(gt_pos) + '.pred'  # results file
                    if write_type == 'list':
                        list2file(filepath, predictions)
                    elif write_type == 'vqa':
                        try:
                            # exec ('refs = self.ds.Y_' + s + '[gt_id]')
                            y_split = getattr(self.ds, 'Y_' + s)
                            refs = y_split[gt_id]
                        except Exception:
                            refs = ['N/A' for _ in range(probs.shape[0])]
                        extra_data_plot = {'reference': refs,
                                           'probs': probs,
                                           'vocab': index2word_y}
                        list2vqa(filepath, predictions,
                                 self.extra_vars[gt_pos][s]['question_ids'],
                                 extra=extra_data_plot)
                    elif write_type == 'listoflists':
                        listoflists2file(filepath, predictions)
                    elif write_type == 'numpy':
                        numpy2file(filepath, predictions)
                    elif write_type == '3DLabels':
                        raise NotImplementedError(
                            'Write 3DLabels function is not implemented')
                    elif write_type == '3DSemanticLabel':
                        folder_path = self.save_path + '/' + s + '_' + counter_name + '_' + str(
                            epoch)  # results folder
                        numpy2imgs(folder_path,
                                   predictions,
                                   eval('self.ds.X_' + s + '["' + self.input_id + '"]'),
                                   self.ds)
                    else:
                        raise NotImplementedError('The store type "' + self.write_type + '" is not implemented.')

                # Evaluate on each metric
                for metric in these_metrics:
                    if self.verbose > 0:
                        logging.info('Evaluating on metric ' + metric)
                    filepath = self.save_path + '/' + s + '.' + metric  # results file

                    if s == 'train':
                        logging.info(
                            "WARNING: evaluation results on 'train' split might be incorrect when"
                            "applying random image shuffling.")

                    # Evaluate on the chosen metric
                    metrics = evaluation.select[metric](
                        pred_list=predictions,
                        verbose=self.verbose,
                        extra_vars=self.extra_vars[gt_pos],
                        split=s)

                    # Print results to file and store in model log
                    with open(filepath, 'a') as f:
                        header = counter_name + ','
                        line = str(epoch) + ','
                        # Store in model log
                        self.model_to_eval.log(s, counter_name, epoch)
                        for metric_ in sorted(metrics):
                            value = metrics[metric_]
                            # Multiple-output model
                            if self.gt_pos and self.gt_pos != 0:
                                metric_ += '_output_' + str(gt_pos)
                            all_metrics.append(metric_)
                            header += metric_ + ','
                            line += str(value) + ','
                            # Store in model log
                            self.model_to_eval.log(s, metric_, value)
                        if not self.written_header:
                            f.write(header + '\n')
                            self.written_header = True
                        f.write(line + '\n')

                    if self.verbose > 0:
                        logging.info('Done evaluating on metric ' + metric)

        # Store losses
        if logs.get('loss') is not None:
            self.model_to_eval.log('train', 'train_loss', logs['loss'])
        if logs.get('valid_loss') is not None:
            self.model_to_eval.log('val', 'val_loss', logs['valid_loss'])

        # Plot results so far
        if self.do_plot:
            if self.metric_name:
                self.model_to_eval.plot(counter_name, set(all_metrics),
                                        self.set_name, upperbound=self.max_plot)

        # Save the model
        if self.save_each_evaluation:
            from keras_wrapper.cnn_model import saveModel
            saveModel(self.model_to_eval, epoch, store_iter=not self.eval_on_epochs)

        # Recover inputs and outputs mappings for resume training
        self.recoverInOutMappings()