def load_in(encoding, direction): if encoding == "char": print(encoding) args = parse_args_char(direction) else: args = parse_args_bpe(direction) if args["config"] is None: logging.info("Reading parameters from config.py") from config import load_parameters params = load_parameters() else: logging.info("Loading parameters from %s" % str(args["config"])) params = pkl2dict(args["config"]) try: for arg in args["changes"]: try: k, v = arg["split"]('=') except ValueError: print( 'Overwritten arguments must have the form key=Value. \n Currently are: %s' % str(args["changes"])) exit(1) try: params[k] = ast.literal_eval(v) except ValueError: params[k] = v except ValueError: print('Error processing arguments: (', k, ",", v, ")") exit(2) params = check_params(params) model, dataset = loadmodel(encoding, args) # dit moet een functie worden # sample_ensemble(args, params, models, dataset) return args, params, model, dataset
def main(): args = parse_args() server_address = (args.address, args.port) httpd = HTTPServer(server_address, NMTHandler) logger.setLevel(args.logging_level) parameters = load_parameters() if args.config is not None: logger.info("Loading parameters from %s" % str(args.config)) parameters = update_parameters(parameters, pkl2dict(args.config)) if args.online: online_parameters = load_parameters_online() parameters = update_parameters(parameters, online_parameters) try: for arg in args.changes: try: k, v = arg.split('=') except ValueError: print( 'Overwritten arguments must have the form key=Value. \n Currently are: %s' % str(args.changes)) exit(1) try: parameters[k] = ast.literal_eval(v) except ValueError: parameters[k] = v except ValueError: print('Error processing arguments: (', k, ",", v, ")") exit(2) dataset = loadDataset(args.dataset) # For converting predictions into sentences # Dataset backwards compatibility bpe_separator = dataset.BPE_separator if hasattr( dataset, "BPE_separator") and dataset.BPE_separator is not None else '@@' # Build BPE tokenizer if necessary if 'bpe' in parameters['TOKENIZATION_METHOD'].lower(): logger.info('Building BPE') if not dataset.BPE_built: dataset.build_bpe(parameters.get( 'BPE_CODES_PATH', parameters['DATA_ROOT_PATH'] + '/training_codes.joint'), separator=bpe_separator) # Build tokenization function tokenize_f = eval('dataset.' + parameters.get('TOKENIZATION_METHOD', 'tokenize_bpe')) detokenize_function = eval( 'dataset.' + parameters.get('DETOKENIZATION_METHOD', 'detokenize_bpe')) dataset.build_moses_tokenizer(language=parameters['SRC_LAN']) dataset.build_moses_detokenizer(language=parameters['TRG_LAN']) tokenize_general = dataset.tokenize_moses detokenize_general = dataset.detokenize_moses # Prediction parameters params_prediction = dict() params_prediction['max_batch_size'] = parameters.get('BATCH_SIZE', 20) params_prediction['n_parallel_loaders'] = parameters.get( 'PARALLEL_LOADERS', 1) params_prediction['beam_size'] = parameters.get('BEAM_SIZE', 6) params_prediction['maxlen'] = parameters.get('MAX_OUTPUT_TEXT_LEN_TEST', 100) params_prediction['optimized_search'] = parameters['OPTIMIZED_SEARCH'] params_prediction['model_inputs'] = parameters['INPUTS_IDS_MODEL'] params_prediction['model_outputs'] = parameters['OUTPUTS_IDS_MODEL'] params_prediction['dataset_inputs'] = parameters['INPUTS_IDS_DATASET'] params_prediction['dataset_outputs'] = parameters['OUTPUTS_IDS_DATASET'] params_prediction['search_pruning'] = parameters.get( 'SEARCH_PRUNING', False) params_prediction['normalize_probs'] = True params_prediction['alpha_factor'] = parameters.get('ALPHA_FACTOR', 1.0) params_prediction['coverage_penalty'] = True params_prediction['length_penalty'] = True params_prediction['length_norm_factor'] = parameters.get( 'LENGTH_NORM_FACTOR', 0.0) params_prediction['coverage_norm_factor'] = parameters.get( 'COVERAGE_NORM_FACTOR', 0.0) params_prediction['pos_unk'] = parameters.get('POS_UNK', False) params_prediction['heuristic'] = parameters.get('HEURISTIC', 0) params_prediction['state_below_index'] = -1 params_prediction['output_text_index'] = 0 params_prediction['state_below_maxlen'] = -1 if parameters.get( 'PAD_ON_BATCH', True) else parameters.get('MAX_OUTPUT_TEXT_LEN', 50) params_prediction['output_max_length_depending_on_x'] = parameters.get( 'MAXLEN_GIVEN_X', True) params_prediction[ 'output_max_length_depending_on_x_factor'] = parameters.get( 'MAXLEN_GIVEN_X_FACTOR', 3) params_prediction['output_min_length_depending_on_x'] = parameters.get( 'MINLEN_GIVEN_X', True) params_prediction[ 'output_min_length_depending_on_x_factor'] = parameters.get( 'MINLEN_GIVEN_X_FACTOR', 2) params_prediction['attend_on_output'] = parameters.get( 'ATTEND_ON_OUTPUT', 'transformer' in parameters['MODEL_TYPE'].lower()) # Manage pos_unk strategies if parameters['POS_UNK']: mapping = None if dataset.mapping == dict() else dataset.mapping else: mapping = None if 'transformer' in parameters['MODEL_TYPE'].lower(): params_prediction['pos_unk'] = False params_prediction['coverage_penalty'] = False # Training parameters parameters_training = dict() if args.online: logger.info('Loading models from %s' % str(args.models)) parameters_training = { # Traning parameters 'n_epochs': parameters['MAX_EPOCH'], 'shuffle': False, 'loss': parameters.get('LOSS', 'categorical_crossentropy'), 'batch_size': parameters.get('BATCH_SIZE', 1), 'homogeneous_batches': False, 'optimizer': parameters.get('OPTIMIZER', 'SGD'), 'lr': parameters.get('LR', 0.1), 'lr_decay': parameters.get('LR_DECAY', None), 'lr_gamma': parameters.get('LR_GAMMA', 1.), 'epochs_for_save': -1, 'verbose': args.verbose, 'eval_on_sets': parameters.get('EVAL_ON_SETS_KERAS', None), 'n_parallel_loaders': parameters['PARALLEL_LOADERS'], 'extra_callbacks': [], # callbacks, 'reload_epoch': parameters['RELOAD'], 'epoch_offset': parameters['RELOAD'], 'data_augmentation': parameters['DATA_AUGMENTATION'], 'patience': parameters.get('PATIENCE', 0), 'metric_check': parameters.get('STOP_METRIC', None), 'eval_on_epochs': parameters.get('EVAL_EACH_EPOCHS', True), 'each_n_epochs': parameters.get('EVAL_EACH', 1), 'start_eval_on_epoch': parameters.get('START_EVAL_ON_EPOCH', 0), 'additional_training_settings': { 'k': parameters.get('K', 1), 'tau': parameters.get('TAU', 1), 'lambda': parameters.get('LAMBDA', 0.5), 'c': parameters.get('C', 0.5), 'd': parameters.get('D', 0.5) } } model_instances = [ TranslationModel( parameters, model_type=parameters['MODEL_TYPE'], verbose=parameters['VERBOSE'], model_name=parameters['MODEL_NAME'] + '_' + str(i), vocabularies=dataset.vocabulary, store_path=parameters['STORE_PATH'], set_optimizer=False) for i in range(len(args.models)) ] models = [ updateModel(model, path, -1, full_path=True) for (model, path) in zip(model_instances, args.models) ] else: models = [loadModel(m, -1, full_path=True) for m in args.models] for nmt_model in models: nmt_model.setParams(parameters) nmt_model.setOptimizer() parameters['INPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len[ parameters['INPUTS_IDS_DATASET'][0]] parameters['OUTPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len[ parameters['OUTPUTS_IDS_DATASET'][0]] # Get word2index and index2word dictionaries index2word_y = dataset.vocabulary[parameters['OUTPUTS_IDS_DATASET'] [0]]['idx2words'] word2index_y = dataset.vocabulary[parameters['OUTPUTS_IDS_DATASET'] [0]]['words2idx'] index2word_x = dataset.vocabulary[parameters['INPUTS_IDS_DATASET'] [0]]['idx2words'] word2index_x = dataset.vocabulary[parameters['INPUTS_IDS_DATASET'] [0]]['words2idx'] excluded_words = None interactive_beam_searcher = NMTSampler(models, dataset, parameters, params_prediction, parameters_training, tokenize_f, detokenize_function, tokenize_general, detokenize_general, mapping=mapping, word2index_x=word2index_x, word2index_y=word2index_y, index2word_y=index2word_y, eos_symbol=args.eos_symbol, excluded_words=excluded_words, online=args.online, verbose=args.verbose) httpd.sampler = interactive_beam_searcher logger.info('Server starting at %s' % str(server_address)) httpd.serve_forever()
"--dataset", required=False, help="Dataset instance with data") parser.add_argument("changes", nargs="*", help="Changes to config. " "Following the syntax Key=Value", default="") return parser.parse_args() if __name__ == "__main__": args = parse_args() parameters = load_parameters() if args.config is not None: parameters = update_parameters(parameters, pkl2dict(args.config)) try: for arg in args.changes: try: k, v = arg.split('=') except ValueError: print( 'Overwritten arguments must have the form key=Value. \n Currently are: %s' % str(args.changes)) exit(1) try: parameters[k] = ast.literal_eval(v) except ValueError: parameters[k] = v except ValueError: print('Error processing arguments: (', k, ",", v, ")")
return parser.parse_args() if __name__ == "__main__": args = parse_args() models = args.models logging.info("Using an ensemble of %d models" % len(args.models)) models = [loadModel(m, -1, full_path=True) for m in args.models] if args.config is None: logging.info("Reading parameters from config.py") from config import load_parameters params = load_parameters() else: logging.info("Loading parameters from %s" % str(args.config)) params = pkl2dict(args.config) try: for arg in args.changes: try: k, v = arg.split('=') except ValueError: print 'Overwritten arguments must have the form key=Value. \n Currently are: %s' % str( args.changes) exit(1) try: params[k] = ast.literal_eval(v) except ValueError: params[k] = v except ValueError: print 'Error processing arguments: (', k, ",", v, ")" exit(2)
def sample_ensemble(args, params): """ Use several translation models for obtaining predictions from a source text file. :param argparse.Namespace args: Arguments given to the method: * dataset: Dataset instance with data. * text: Text file with source sentences. * splits: Splits to sample. Should be already included in the dataset object. * dest: Output file to save scores. * weights: Weight given to each model in the ensemble. You should provide the same number of weights than models. By default, it applies the same weight to each model (1/N). * n_best: Write n-best list (n = beam size). * config: Config .pkl for loading the model configuration. If not specified, hyperparameters are read from config.py. * models: Path to the models. * verbose: Be verbose or not. :param params: parameters of the translation model. """ from data_engine.prepare_data import update_dataset_from_file from keras_wrapper.model_ensemble import BeamSearchEnsemble from keras_wrapper.cnn_model import loadModel from keras_wrapper.dataset import loadDataset from keras_wrapper.utils import decode_predictions_beam_search logger.info("Using an ensemble of %d models" % len(args.models)) models = [loadModel(m, -1, full_path=True) for m in args.models] dataset = loadDataset(args.dataset) dataset = update_dataset_from_file(dataset, args.text, params, splits=args.splits, remove_outputs=True) params['INPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len[params['INPUTS_IDS_DATASET'][0]] params['OUTPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len[params['OUTPUTS_IDS_DATASET'][0]] # For converting predictions into sentences index2word_y = dataset.vocabulary[params['OUTPUTS_IDS_DATASET'][0]]['idx2words'] if params.get('APPLY_DETOKENIZATION', False): detokenize_function = eval('dataset.' + params['DETOKENIZATION_METHOD']) params_prediction = dict() params_prediction['max_batch_size'] = params.get('BATCH_SIZE', 20) params_prediction['n_parallel_loaders'] = params.get('PARALLEL_LOADERS', 1) params_prediction['beam_size'] = params.get('BEAM_SIZE', 6) params_prediction['maxlen'] = params.get('MAX_OUTPUT_TEXT_LEN_TEST', 100) params_prediction['optimized_search'] = params['OPTIMIZED_SEARCH'] params_prediction['model_inputs'] = params['INPUTS_IDS_MODEL'] params_prediction['model_outputs'] = params['OUTPUTS_IDS_MODEL'] params_prediction['dataset_inputs'] = params['INPUTS_IDS_DATASET'] params_prediction['dataset_outputs'] = params['OUTPUTS_IDS_DATASET'] params_prediction['search_pruning'] = params.get('SEARCH_PRUNING', False) params_prediction['normalize_probs'] = params.get('NORMALIZE_SAMPLING', False) params_prediction['alpha_factor'] = params.get('ALPHA_FACTOR', 1.0) params_prediction['coverage_penalty'] = params.get('COVERAGE_PENALTY', False) params_prediction['length_penalty'] = params.get('LENGTH_PENALTY', False) params_prediction['length_norm_factor'] = params.get('LENGTH_NORM_FACTOR', 0.0) params_prediction['coverage_norm_factor'] = params.get('COVERAGE_NORM_FACTOR', 0.0) params_prediction['pos_unk'] = params.get('POS_UNK', False) params_prediction['state_below_maxlen'] = -1 if params.get('PAD_ON_BATCH', True) \ else params.get('MAX_OUTPUT_TEXT_LEN', 50) params_prediction['output_max_length_depending_on_x'] = params.get('MAXLEN_GIVEN_X', True) params_prediction['output_max_length_depending_on_x_factor'] = params.get('MAXLEN_GIVEN_X_FACTOR', 3) params_prediction['output_min_length_depending_on_x'] = params.get('MINLEN_GIVEN_X', True) params_prediction['output_min_length_depending_on_x_factor'] = params.get('MINLEN_GIVEN_X_FACTOR', 2) params_prediction['attend_on_output'] = params.get('ATTEND_ON_OUTPUT', 'transformer' in params['MODEL_TYPE'].lower()) params_prediction['glossary'] = params.get('GLOSSARY', None) heuristic = params.get('HEURISTIC', 0) mapping = None if dataset.mapping == dict() else dataset.mapping model_weights = args.weights if args.glossary is not None: glossary = pkl2dict(args.glossary) elif params_prediction['glossary'] is not None: glossary = pkl2dict(params_prediction['glossary']) else: glossary = None if model_weights: assert len(model_weights) == len( models), 'You should give a weight to each model. You gave %d models and %d weights.' % ( len(models), len(model_weights)) model_weights = list(map(float, model_weights)) if len(model_weights) > 1: logger.info('Giving the following weights to each model: %s' % str(model_weights)) for s in args.splits: # Apply model predictions params_prediction['predict_on_sets'] = [s] beam_searcher = BeamSearchEnsemble(models, dataset, params_prediction, model_weights=model_weights, n_best=args.n_best, verbose=args.verbose) predictions = beam_searcher.predictBeamSearchNet()[s] samples = predictions['samples'] alphas = predictions['alphas'] if params_prediction['pos_unk'] else None if params_prediction['pos_unk']: sources = [x.strip() for x in open(args.text, 'r').read().split('\n')] sources = sources[:-1] if len(sources[-1]) == 0 else sources else: sources = None decoded_predictions = decode_predictions_beam_search(samples, index2word_y, glossary=glossary, alphas=alphas, x_text=sources, heuristic=heuristic, mapping=mapping, verbose=args.verbose) # Apply detokenization function if needed if params.get('APPLY_DETOKENIZATION', False): decoded_predictions = list(map(detokenize_function, decoded_predictions)) if args.n_best: n_best_predictions = [] for i, (n_best_preds, n_best_scores, n_best_alphas) in enumerate(predictions['n_best']): n_best_sample_score = [] for n_best_pred, n_best_score, n_best_alpha in zip(n_best_preds, n_best_scores, n_best_alphas): pred = decode_predictions_beam_search([n_best_pred], index2word_y, glossary=glossary, alphas=[n_best_alpha] if params_prediction[ 'pos_unk'] else None, x_text=[sources[i]] if params_prediction['pos_unk'] else None, heuristic=heuristic, mapping=mapping, verbose=args.verbose) # Apply detokenization function if needed if params.get('APPLY_DETOKENIZATION', False): pred = list(map(detokenize_function, pred)) n_best_sample_score.append([i, pred, n_best_score]) n_best_predictions.append(n_best_sample_score) # Store result if args.dest is not None: filepath = args.dest # results file if params.get('SAMPLING_SAVE_MODE', 'list'): list2file(filepath, decoded_predictions) if args.n_best: nbest2file(filepath + '.nbest', n_best_predictions) else: raise Exception('Only "list" is allowed in "SAMPLING_SAVE_MODE"') else: list2stdout(decoded_predictions) if args.n_best: logger.info('Storing n-best sentences in ./' + s + '.nbest') nbest2file('./' + s + '.nbest', n_best_predictions) logger.info('Sampling finished')
"-ch", "--changes", nargs="*", help="Changes to config, following the syntax Key=Value", default="") return parser.parse_args() if __name__ == "__main__": args = parse_args() # Update parameters if args.config is not None: logger.info('Reading parameters from %s.' % args.config) params = update_parameters({}, pkl2dict(args.config)) else: logger.info('Reading parameters from config.py.') params = load_parameters() logger.info('Starting active learning with arguments: %str' % str(args)) online_parameters = load_parameters_online() params = update_parameters(params, online_parameters) try: for arg in args.changes: try: k, v = arg.split('=') except ValueError: print 'Overwritten arguments must have the form key=Value. \n Currently are: %s' % str( args.changes) exit(1)
def main(): args = parse_args() server_address = ('', args.port) httpd = BaseHTTPServer.HTTPServer(server_address, NMTHandler) if args.config is None: logging.info("Reading parameters from config.py") from config import load_parameters params = load_parameters() else: logging.info("Loading parameters from %s" % str(args.config)) params = pkl2dict(args.config) try: for arg in args.changes: try: k, v = arg.split('=') except ValueError: print 'Overwritten arguments must have the form key=Value. \n Currently are: %s' % str( args.changes) exit(1) try: params[k] = ast.literal_eval(v) except ValueError: params[k] = v except ValueError: print 'Error processing arguments: (', k, ",", v, ")" exit(2) dataset = loadDataset(args.dataset) # For converting predictions into sentences # Dataset backwards compatibility bpe_separator = dataset.BPE_separator if hasattr( dataset, "BPE_separator") and dataset.BPE_separator is not None else '@@' # Build BPE tokenizer if necessary if 'bpe' in params['TOKENIZATION_METHOD'].lower(): logger.info('Building BPE') if not dataset.BPE_built: dataset.build_bpe( params.get('BPE_CODES_PATH', params['DATA_ROOT_PATH'] + '/training_codes.joint'), bpe_separator) # Build tokenization function tokenize_f = eval('dataset.' + params.get('TOKENIZATION_METHOD', 'tokenize_none')) detokenize_function = eval( 'dataset.' + params.get('DETOKENIZATION_METHOD', 'detokenize_none')) dataset.build_moses_tokenizer(language=params['SRC_LAN']) dataset.build_moses_detokenizer(language=params['TRG_LAN']) tokenize_general = dataset.tokenize_moses detokenize_general = dataset.detokenize_moses params_prediction = dict() params_prediction['max_batch_size'] = params.get('BATCH_SIZE', 20) params_prediction['n_parallel_loaders'] = params.get('PARALLEL_LOADERS', 1) params_prediction['beam_size'] = params.get('BEAM_SIZE', 6) params_prediction['maxlen'] = params.get('MAX_OUTPUT_TEXT_LEN_TEST', 100) params_prediction['optimized_search'] = params['OPTIMIZED_SEARCH'] params_prediction['model_inputs'] = params['INPUTS_IDS_MODEL'] params_prediction['model_outputs'] = params['OUTPUTS_IDS_MODEL'] params_prediction['dataset_inputs'] = params['INPUTS_IDS_DATASET'] params_prediction['dataset_outputs'] = params['OUTPUTS_IDS_DATASET'] params_prediction['search_pruning'] = params.get('SEARCH_PRUNING', False) params_prediction['normalize_probs'] = params.get('NORMALIZE_SAMPLING', False) params_prediction['alpha_factor'] = params.get('ALPHA_FACTOR', 1.0) params_prediction['coverage_penalty'] = params.get('COVERAGE_PENALTY', False) params_prediction['length_penalty'] = params.get('LENGTH_PENALTY', False) params_prediction['length_norm_factor'] = params.get( 'LENGTH_NORM_FACTOR', 0.0) params_prediction['coverage_norm_factor'] = params.get( 'COVERAGE_NORM_FACTOR', 0.0) params_prediction['pos_unk'] = params.get('POS_UNK', False) params_prediction['heuristic'] = params.get('HEURISTIC', 0) params_prediction['state_below_maxlen'] = -1 if params.get('PAD_ON_BATCH', True) \ else params.get('MAX_OUTPUT_TEXT_LEN', 50) params_prediction['output_max_length_depending_on_x'] = params.get( 'MAXLEN_GIVEN_X', True) params_prediction['output_max_length_depending_on_x_factor'] = params.get( 'MAXLEN_GIVEN_X_FACTOR', 3) params_prediction['output_min_length_depending_on_x'] = params.get( 'MINLEN_GIVEN_X', True) params_prediction['output_min_length_depending_on_x_factor'] = params.get( 'MINLEN_GIVEN_X_FACTOR', 2) # Manage pos_unk strategies if params['POS_UNK']: mapping = None if dataset.mapping == dict() else dataset.mapping else: mapping = None if args.online: logging.info('Loading models from %s' % str(args.models)) model_instances = [ TranslationModel(params, model_type=params['MODEL_TYPE'], verbose=params['VERBOSE'], model_name=params['MODEL_NAME'] + '_' + str(i), vocabularies=dataset.vocabulary, store_path=params['STORE_PATH'], set_optimizer=False) for i in range(len(args.models)) ] models = [ updateModel(model, path, -1, full_path=True) for (model, path) in zip(model_instances, args.models) ] # Set additional inputs to models if using a custom loss function params['USE_CUSTOM_LOSS'] = True if 'PAS' in params[ 'OPTIMIZER'] else False if params['N_BEST_OPTIMIZER']: logging.info('Using N-best optimizer') models = build_online_models(models, params) online_trainer = OnlineTrainer(models, dataset, None, None, params_training, verbose=args.verbose) else: models = [loadModel(m, -1, full_path=True) for m in args.models] params['INPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len[ params['INPUTS_IDS_DATASET'][0]] params['OUTPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len[ params['OUTPUTS_IDS_DATASET'][0]] # Get word2index and index2word dictionaries index2word_y = dataset.vocabulary[params['OUTPUTS_IDS_DATASET'] [0]]['idx2words'] word2index_y = dataset.vocabulary[params['OUTPUTS_IDS_DATASET'] [0]]['words2idx'] index2word_x = dataset.vocabulary[params['INPUTS_IDS_DATASET'] [0]]['idx2words'] word2index_x = dataset.vocabulary[params['INPUTS_IDS_DATASET'] [0]]['words2idx'] excluded_words = None interactive_beam_searcher = NMTSampler(models, dataset, params_prediction, tokenize_f, detokenize_function, tokenize_general, detokenize_general, mapping=mapping, word2index_x=word2index_x, word2index_y=word2index_y, index2word_y=index2word_y, excluded_words=excluded_words, verbose=args.verbose) # Compile Theano sampling function by generating a fake sample # TODO: Find a better way of doing this print "Compiling sampler..." interactive_beam_searcher.generate_sample('i') httpd.sampler = interactive_beam_searcher print 'Server starting at localhost:' + str(args.port) httpd.serve_forever()
def main(): args = parse_args() server_address = (args.address, args.port) httpd = BaseHTTPServer.HTTPServer(server_address, NMTHandler) logger.setLevel(args.logging_level) if args.config is not None: logger.info('Reading parameters from %s.' % args.config) parameters = update_parameters({}, pkl2dict(args.config)) else: logger.info('Reading parameters from config.py.') parameters = load_parameters() if args.online: online_parameters = load_parameters_online(parameters) parameters = update_parameters(parameters, online_parameters) try: for arg in args.changes: try: k, v = arg.split('=') except ValueError: print( 'Overwritten arguments must have the form key=Value. \n Currently are: %s' % str(args.changes)) exit(1) try: parameters[k] = ast.literal_eval(v) except ValueError: parameters[k] = v except ValueError: print('Error processing arguments: (', k, ",", v, ")") exit(2) check_params(parameters) if args.verbose: logging.info("parameters = " + str(parameters)) dataset = loadDataset(args.dataset) # Dataset backwards compatibility bpe_separator = dataset.BPE_separator if hasattr( dataset, "BPE_separator") and dataset.BPE_separator is not None else '@@' # Build BPE tokenizer if necessary if 'bpe' in parameters['TOKENIZATION_METHOD'].lower(): logger.info('Building BPE') if not dataset.BPE_built: dataset.build_bpe( parameters.get( 'BPE_CODES_PATH', parameters['DATA_ROOT_PATH'] + '/training_codes.joint'), bpe_separator) # Build tokenization function tokenize_f = eval('dataset.' + parameters.get('TOKENIZATION_METHOD', 'tokenize_bpe')) detokenize_function = eval( 'dataset.' + parameters.get('DETOKENIZATION_METHOD', 'detokenize_bpe')) dataset.build_moses_tokenizer(language=parameters['TRG_LAN']) dataset.build_moses_detokenizer(language=parameters['TRG_LAN']) tokenize_general = dataset.tokenize_moses detokenize_general = dataset.detokenize_moses parameters_training = dict() if args.online: logging.info('Loading models from %s' % str(args.models)) parameters_training = { # Traning parameters 'n_epochs': parameters['MAX_EPOCH'], 'shuffle': False, 'loss': parameters.get('LOSS', 'categorical_crossentropy'), 'batch_size': parameters.get('BATCH_SIZE', 1), 'homogeneous_batches': False, 'optimizer': parameters.get('OPTIMIZER', 'SGD'), 'lr': parameters.get('LR', 0.1), 'lr_decay': parameters.get('LR_DECAY', None), 'lr_gamma': parameters.get('LR_GAMMA', 1.), 'epochs_for_save': -1, 'verbose': args.verbose, 'eval_on_sets': parameters['EVAL_ON_SETS_KERAS'], 'n_parallel_loaders': parameters['PARALLEL_LOADERS'], 'extra_callbacks': [], # callbacks, 'reload_epoch': parameters['RELOAD'], 'epoch_offset': parameters['RELOAD'], 'data_augmentation': parameters['DATA_AUGMENTATION'], 'patience': parameters.get('PATIENCE', 0), 'metric_check': parameters.get('STOP_METRIC', None), 'eval_on_epochs': parameters.get('EVAL_EACH_EPOCHS', True), 'each_n_epochs': parameters.get('EVAL_EACH', 1), 'start_eval_on_epoch': parameters.get('START_EVAL_ON_EPOCH', 0), 'additional_training_settings': { 'k': parameters.get('K', 1), 'tau': parameters.get('TAU', 1), 'lambda': parameters.get('LAMBDA', 0.5), 'c': parameters.get('C', 0.5), 'd': parameters.get('D', 0.5) } } # Load trainable model(s) logging.info('Loading models from %s' % str(args.models)) model_instances = [ Captioning_Model( parameters, model_type=parameters['MODEL_TYPE'], verbose=parameters['VERBOSE'], model_name=parameters['MODEL_NAME'] + '_' + str(i), vocabularies=dataset.vocabulary, store_path=parameters['STORE_PATH'], set_optimizer=False) for i in range(len(args.models)) ] models = [ updateModel(model, path, -1, full_path=True) for (model, path) in zip(model_instances, args.models) ] for model in models: model.setParams(parameters) model.setOptimizer() else: # Otherwise, load regular model(s) models = [loadModel(m, -1, full_path=True) for m in args.models] parameters['OUTPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len[ parameters['OUTPUTS_IDS_DATASET'][0]] # Get word2index and index2word dictionaries index2word_y = dataset.vocabulary[parameters['OUTPUTS_IDS_DATASET'] [0]]['idx2words'] word2index_y = dataset.vocabulary[parameters['OUTPUTS_IDS_DATASET'] [0]]['words2idx'] unk_id = dataset.extra_words['<unk>'] parameters_prediction = { 'max_batch_size': parameters['BATCH_SIZE'], 'n_parallel_loaders': parameters['PARALLEL_LOADERS'], 'predict_on_sets': [args.split], 'beam_size': parameters['BEAM_SIZE'], 'maxlen': parameters['MAX_OUTPUT_TEXT_LEN_TEST'], 'optimized_search': parameters['OPTIMIZED_SEARCH'], 'model_inputs': parameters['INPUTS_IDS_MODEL'], 'model_outputs': parameters['OUTPUTS_IDS_MODEL'], 'dataset_inputs': parameters['INPUTS_IDS_DATASET'], 'dataset_outputs': parameters['OUTPUTS_IDS_DATASET'], 'normalize_probs': parameters['NORMALIZE_SAMPLING'], 'alpha_factor': parameters['ALPHA_FACTOR'], 'normalize': parameters.get('NORMALIZATION', False), 'normalization_type': parameters.get('NORMALIZATION_TYPE', None), 'data_augmentation': parameters.get('DATA_AUGMENTATION', False), 'mean_substraction': parameters.get('MEAN_SUBTRACTION', False), 'wo_da_patch_type': parameters.get('WO_DA_PATCH_TYPE', 'whole'), 'da_patch_type': parameters.get('DA_PATCH_TYPE', 'resize_and_rndcrop'), 'da_enhance_list': parameters.get('DA_ENHANCE_LIST', None), 'pos_unk': parameters.get('POS_UNK', None), 'heuristic': parameters.get('HEURISTIC', None), 'search_pruning': parameters.get('SEARCH_PRUNING', False), 'state_below_index': -1, 'output_text_index': 0, 'apply_tokenization': parameters.get('APPLY_TOKENIZATION', False), 'tokenize_f': eval('dataset.' + parameters.get('TOKENIZATION_METHOD', 'tokenize_none')), 'apply_detokenization': parameters.get('APPLY_DETOKENIZATION', True), 'detokenize_f': eval('dataset.' + parameters.get('DETOKENIZATION_METHOD', 'detokenize_none')), 'coverage_penalty': parameters.get('COVERAGE_PENALTY', False), 'length_penalty': parameters.get('LENGTH_PENALTY', False), 'length_norm_factor': parameters.get('LENGTH_NORM_FACTOR', 0.0), 'coverage_norm_factor': parameters.get('COVERAGE_NORM_FACTOR', 0.0), 'output_max_length_depending_on_x': parameters.get('MAXLEN_GIVEN_X', False), 'output_max_length_depending_on_x_factor': parameters.get('MAXLEN_GIVEN_X_FACTOR', 3), 'output_min_length_depending_on_x': parameters.get('MINLEN_GIVEN_X', False), 'output_min_length_depending_on_x_factor': parameters.get('MINLEN_GIVEN_X_FACTOR', 2), 'attend_on_output': parameters.get('ATTEND_ON_OUTPUT', 'transformer' in parameters['MODEL_TYPE'].lower()), 'n_best_optimizer': parameters.get('N_BEST_OPTIMIZER', False) } excluded_words = None interactive_beam_searcher = VideoDescSampler(models, dataset, parameters, parameters_prediction, parameters_training, tokenize_f, detokenize_function, tokenize_general, detokenize_general, split=args.split, word2index_y=word2index_y, index2word_y=index2word_y, eos_symbol=args.eos_symbol, excluded_words=excluded_words, unk_id=unk_id, online=args.online, verbose=args.verbose) httpd.sampler = interactive_beam_searcher logger.info('Server starting at %s' % str(server_address)) httpd.serve_forever()
def interactive_simulation(): args = parse_args() # Update parameters if args.config is not None: logger.info('Reading parameters from %s.' % args.config) params = update_parameters({}, pkl2dict(args.config)) else: logger.info('Reading parameters from config.py.') params = load_parameters() if args.online: from config_online import load_parameters as load_parameters_online online_parameters = load_parameters_online(params) params = update_parameters(params, online_parameters) try: for arg in args.changes: try: k, v = arg.split('=') except ValueError: print( 'Overwritten arguments must have the form key=Value. \n Currently are: %s' % str(args.changes)) exit(1) try: params[k] = ast.literal_eval(v) except ValueError: params[k] = v except ValueError: print('Error processing arguments: (', k, ",", v, ")") exit(2) check_params(params) if args.verbose: logging.info("params = " + str(params)) dataset = loadDataset(args.dataset) # dataset = update_dataset_from_file(dataset, args.source, params, splits=args.splits, remove_outputs=True) # Dataset backwards compatibility bpe_separator = dataset.BPE_separator if hasattr( dataset, "BPE_separator") and dataset.BPE_separator is not None else u'@@' # Set tokenization method params[ 'TOKENIZATION_METHOD'] = 'tokenize_bpe' if args.tokenize_bpe else params.get( 'TOKENIZATION_METHOD', 'tokenize_none') # Build BPE tokenizer if necessary if 'bpe' in params['TOKENIZATION_METHOD'].lower(): logger.info('Building BPE') if not dataset.BPE_built: dataset.build_bpe(params.get( 'BPE_CODES_PATH', params['DATA_ROOT_PATH'] + '/training_codes.joint'), separator=bpe_separator) # Build tokenization function tokenize_f = eval('dataset.' + params.get('TOKENIZATION_METHOD', 'tokenize_none')) if args.online: # Traning params params_training = { # Traning params 'n_epochs': params['MAX_EPOCH'], 'shuffle': False, 'loss': params.get('LOSS', 'categorical_crossentropy'), 'batch_size': params.get('BATCH_SIZE', 1), 'homogeneous_batches': False, 'optimizer': params.get('OPTIMIZER', 'SGD'), 'lr': params.get('LR', 0.1), 'lr_decay': params.get('LR_DECAY', None), 'lr_gamma': params.get('LR_GAMMA', 1.), 'epochs_for_save': -1, 'verbose': args.verbose, 'eval_on_sets': params['EVAL_ON_SETS_KERAS'], 'n_parallel_loaders': params['PARALLEL_LOADERS'], 'extra_callbacks': [], # callbacks, 'reload_epoch': 0, 'epoch_offset': 0, 'data_augmentation': params['DATA_AUGMENTATION'], 'patience': params.get('PATIENCE', 0), 'metric_check': params.get('STOP_METRIC', None), 'eval_on_epochs': params.get('EVAL_EACH_EPOCHS', True), 'each_n_epochs': params.get('EVAL_EACH', 1), 'start_eval_on_epoch': params.get('START_EVAL_ON_EPOCH', 0), 'additional_training_settings': { 'k': params.get('K', 1), 'tau': params.get('TAU', 1), 'lambda': params.get('LAMBDA', 0.5), 'c': params.get('C', 0.5), 'd': params.get('D', 0.5) } } else: params_training = dict() params['OUTPUT_VOCABULARY_SIZE'] = dataset.vocabulary_len[ params['OUTPUTS_IDS_DATASET'][0]] logger.info("<<< Using an ensemble of %d models >>>" % len(args.models)) if args.online: # Load trainable model(s) logging.info('Loading models from %s' % str(args.models)) model_instances = [ Captioning_Model(params, model_type=params['MODEL_TYPE'], verbose=params['VERBOSE'], model_name=params['MODEL_NAME'] + '_' + str(i), vocabularies=dataset.vocabulary, store_path=params['STORE_PATH'], clear_dirs=False, set_optimizer=False) for i in range(len(args.models)) ] models = [ updateModel(model, path, -1, full_path=True) for (model, path) in zip(model_instances, args.models) ] # Set additional inputs to models if using a custom loss function params['USE_CUSTOM_LOSS'] = True if 'PAS' in params[ 'OPTIMIZER'] else False if params['N_BEST_OPTIMIZER']: logging.info('Using N-best optimizer') models = build_online_models(models, params) online_trainer = OnlineTrainer(models, dataset, None, None, params_training, verbose=args.verbose) else: # Otherwise, load regular model(s) models = [loadModel(m, -1, full_path=True) for m in args.models] # Load text files logger.info("<<< Storing corrected hypotheses into: %s >>>" % str(args.dest)) ftrans = open(args.dest, 'w') ftrans.close() # Do we want to save the original sentences? if args.original_dest is not None: logger.info("<<< Storing original hypotheses into: %s >>>" % str(args.original_dest)) ftrans_ori = open(args.original_dest, 'w') ftrans_ori.close() if args.references is not None: ftrg = codecs.open(args.references, 'r', encoding='utf-8' ) # File with post-edited (or reference) sentences. all_references = ftrg.read().split('\n') if all_references[-1] == u'': all_references = all_references[:-1] # Get word2index and index2word dictionaries index2word_y = dataset.vocabulary[params['OUTPUTS_IDS_DATASET'] [0]]['idx2words'] word2index_y = dataset.vocabulary[params['OUTPUTS_IDS_DATASET'] [0]]['words2idx'] unk_id = dataset.extra_words['<unk>'] # Initialize counters total_errors = 0 total_words = 0 total_chars = 0 total_mouse_actions = 0 try: for s in args.splits: # Apply model predictions params_prediction = { 'max_batch_size': params['BATCH_SIZE'], 'n_parallel_loaders': params['PARALLEL_LOADERS'], 'predict_on_sets': [s], 'beam_size': params['BEAM_SIZE'], 'maxlen': params['MAX_OUTPUT_TEXT_LEN_TEST'], 'optimized_search': params['OPTIMIZED_SEARCH'], 'model_inputs': params['INPUTS_IDS_MODEL'], 'model_outputs': params['OUTPUTS_IDS_MODEL'], 'dataset_inputs': params['INPUTS_IDS_DATASET'], 'dataset_outputs': params['OUTPUTS_IDS_DATASET'], 'normalize_probs': params.get('NORMALIZE_SAMPLING', False), 'alpha_factor': params.get('ALPHA_FACTOR', 1.0), 'normalize': params.get('NORMALIZATION', False), 'normalization_type': params.get('NORMALIZATION_TYPE', None), 'data_augmentation': params.get('DATA_AUGMENTATION', False), 'mean_substraction': params.get('MEAN_SUBTRACTION', False), 'wo_da_patch_type': params.get('WO_DA_PATCH_TYPE', 'whole'), 'da_patch_type': params.get('DA_PATCH_TYPE', 'resize_and_rndcrop'), 'da_enhance_list': params.get('DA_ENHANCE_LIST', None), 'heuristic': params.get('HEURISTIC', None), 'search_pruning': params.get('SEARCH_PRUNING', False), 'state_below_index': -1, 'output_text_index': 0, 'apply_tokenization': params.get('APPLY_TOKENIZATION', False), 'tokenize_f': eval('dataset.' + params.get('TOKENIZATION_METHOD', 'tokenize_none')), 'apply_detokenization': params.get('APPLY_DETOKENIZATION', True), 'detokenize_f': eval('dataset.' + params.get('DETOKENIZATION_METHOD', 'detokenize_none')), 'coverage_penalty': params.get('COVERAGE_PENALTY', False), 'length_penalty': params.get('LENGTH_PENALTY', False), 'length_norm_factor': params.get('LENGTH_NORM_FACTOR', 0.0), 'coverage_norm_factor': params.get('COVERAGE_NORM_FACTOR', 0.0), 'pos_unk': False, 'state_below_maxlen': -1 if params.get('PAD_ON_BATCH', True) else params.get( 'MAX_OUTPUT_TEXT_LEN_TEST', 50), 'output_max_length_depending_on_x': params.get('MAXLEN_GIVEN_X', False), 'output_max_length_depending_on_x_factor': params.get('MAXLEN_GIVEN_X_FACTOR', 3), 'output_min_length_depending_on_x': params.get('MINLEN_GIVEN_X', False), 'output_min_length_depending_on_x_factor': params.get('MINLEN_GIVEN_X_FACTOR', 2), 'attend_on_output': params.get('ATTEND_ON_OUTPUT', 'transformer' in params['MODEL_TYPE'].lower()), 'n_best_optimizer': params.get('N_BEST_OPTIMIZER', False) } # Build interactive sampler interactive_beam_searcher = InteractiveBeamSearchSampler( models, dataset, params_prediction, excluded_words=None, verbose=args.verbose) start_time = time.time() if args.verbose: logging.info("Params prediction = " + str(params_prediction)) if args.online: logging.info("Params training = " + str(params_training)) n_samples = getattr(dataset, 'len_' + s) if args.references is None: all_references = dataset.extra_variables[s][ params['OUTPUTS_IDS_DATASET'][0]] # Start to translate the source file interactively for n_sample in range(n_samples): errors_sentence = 0 mouse_actions_sentence = 0 hypothesis_number = 0 # Load data from dataset current_input = dataset.getX_FromIndices( s, [n_sample], normalization_type=params_prediction.get( 'normalization_type'), normalization=params_prediction.get('normalize', False), dataAugmentation=params_prediction.get( 'data_augmentation', False), wo_da_patch_type=params_prediction.get( 'wo_da_patch_type', 'whole'), da_patch_type=params_prediction.get( 'da_patch_type', 'resize_and_rndcrop'), da_enhance_list=params_prediction.get( 'da_enhance_list', None))[0][0] # Load references references = all_references[n_sample] tokenized_references = list(map( tokenize_f, references)) if args.tokenize_references else references # Get reference as desired by the user, i.e. detokenized if necessary reference = list(map(params_prediction['detokenize_f'], tokenized_references)) if \ args.detokenize_bpe else tokenized_references # Detokenize line for nicer logging :) logger.debug(u'\n\nProcessing sample %d' % (n_sample + 1)) logger.debug(u'Target: %s' % reference) # 1. Get a first hypothesis trans_indices, costs, alphas = interactive_beam_searcher.sample_beam_search_interactive( current_input) # 1.2 Decode hypothesis hypothesis = decode_predictions_beam_search([trans_indices], index2word_y, pad_sequences=True, verbose=0)[0] # 1.3 Store result (optional) hypothesis = params_prediction['detokenize_f'](hypothesis) \ if params_prediction.get('apply_detokenization', False) else hypothesis if args.original_dest is not None: if params['SAMPLING_SAVE_MODE'] == 'list': list2file(args.original_dest, [hypothesis], permission='a') else: raise Exception( 'Only "list" is allowed in "SAMPLING_SAVE_MODE"') logger.debug(u'Hypo_%d: %s' % (hypothesis_number, hypothesis)) # 2.0 Interactive translation if hypothesis in tokenized_references: # 2.1 If the sentence is correct, we validate it pass else: # 2.2 Wrong hypothesis -> Interactively translate the sentence correct_hypothesis = False last_correct_pos = 0 while not correct_hypothesis: # 2.2.1 Empty data structures for the next sentence fixed_words_user = OrderedDict() unk_words_dict = OrderedDict() isle_indices = [] unks_in_isles = [] if args.prefix: # 2.2.2 Compute longest common character prefix (LCCP) reference_idx, next_correction_pos, validated_prefix = common_prefixes( hypothesis, tokenized_references) else: # 2.2.2 Compute common character segments #TODO next_correction_pos, validated_prefix, validated_segments = common_segments( hypothesis, reference) reference = tokenized_references[reference_idx] if next_correction_pos == len(reference): correct_hypothesis = True break # 2.2.3 Get next correction by checking against the reference next_correction = reference[next_correction_pos] # 2.2.4 Tokenize the prefix properly (possibly applying BPE) tokenized_validated_prefix = tokenize_f( validated_prefix + next_correction) # 2.2.5 Validate words for pos, word in enumerate( tokenized_validated_prefix.split()): fixed_words_user[pos] = word2index_y.get( word, unk_id) if word2index_y.get(word) is None: unk_words_dict[pos] = word # 2.2.6 Constrain search for the last word last_user_word_pos = list(fixed_words_user.keys())[-1] if next_correction != u' ': last_user_word = tokenized_validated_prefix.split( )[-1] filtered_idx2word = dict( (word2index_y[candidate_word], candidate_word) for candidate_word in word2index_y if candidate_word[:len(last_user_word)] == last_user_word) if filtered_idx2word != dict(): del fixed_words_user[last_user_word_pos] if last_user_word_pos in unk_words_dict.keys(): del unk_words_dict[last_user_word_pos] else: filtered_idx2word = dict() logger.debug(u'"%s" to character %d.' % (next_correction, next_correction_pos)) # 2.2.7 Generate a hypothesis compatible with the feedback provided by the user hypothesis = generate_constrained_hypothesis( interactive_beam_searcher, current_input, fixed_words_user, params_prediction, args, isle_indices, filtered_idx2word, index2word_y, None, None, None, unk_words_dict.keys(), unk_words_dict.values(), unks_in_isles) hypothesis_number += 1 hypothesis = u' '.join( hypothesis) # Hypothesis is unicode hypothesis = params_prediction['detokenize_f'](hypothesis) \ if args.detokenize_bpe else hypothesis logger.debug(u'Target: %s' % reference) logger.debug(u"Hypo_%d: %s" % (hypothesis_number, hypothesis)) # 2.2.8 Add a keystroke errors_sentence += 1 # 2.2.9 Add a mouse action if we moved the pointer if next_correction_pos - last_correct_pos > 1: mouse_actions_sentence += 1 last_correct_pos = next_correction_pos # 2.3 Final check: The reference is a subset of the hypothesis: Cut the hypothesis if len(reference) < len(hypothesis): hypothesis = hypothesis[:len(reference)] errors_sentence += 1 logger.debug(u"Cutting hypothesis") # 2.4 Security assertion assert hypothesis in references, "Error: The final hypothesis does not match with the reference! \n" \ "\t Split: %s \n" \ "\t Sentence: %d \n" \ "\t Hypothesis: %s\n" \ "\t Reference: %s" % (s, n_sample + 1, hypothesis, reference) # 3. Update user effort counters mouse_actions_sentence += 1 # This +1 is the validation action chars_sentence = len(hypothesis) total_errors += errors_sentence total_words += len(hypothesis.split()) total_chars += chars_sentence total_mouse_actions += mouse_actions_sentence # 3.1 Log some info logger.debug(u"Final hypotesis: %s" % hypothesis) logger.debug( u"%d errors. " u"Sentence WSR: %4f. " u"Sentence mouse strokes: %d " u"Sentence MAR: %4f. " u"Sentence MAR_c: %4f. " u"Sentence KSMR: %4f. " u"Accumulated (should only be considered for debugging purposes!) " u"WSR: %4f. " u"MAR: %4f. " u"MAR_c: %4f. " u"KSMR: %4f.\n\n\n\n" % (errors_sentence, float(errors_sentence) / len(hypothesis), mouse_actions_sentence, float(mouse_actions_sentence) / len(hypothesis), float(mouse_actions_sentence) / chars_sentence, float(errors_sentence + mouse_actions_sentence) / chars_sentence, float(total_errors) / total_words, float(total_mouse_actions) / total_words, float(total_mouse_actions) / total_chars, float(total_errors + total_mouse_actions) / total_chars)) # 4. If we are performing OL after each correct sample: if args.online: # 4.1 Compute model inputs # 4.1.1 Source text -> Already computed (used for the INMT process) # 4.1.2 State below state_below = dataset.loadText( [reference], vocabularies=dataset.vocabulary[ params['OUTPUTS_IDS_DATASET'][0]], max_len=params['MAX_OUTPUT_TEXT_LEN_TEST'], offset=1, fill=dataset.fill_text[params['INPUTS_IDS_DATASET'] [-1]], pad_on_batch=dataset.pad_on_batch[ params['INPUTS_IDS_DATASET'][-1]], words_so_far=False, loading_X=True)[0] # 4.1.3 Ground truth sample -> Interactively translated sentence trg_seq = dataset.loadTextOneHot( [reference], vocabularies=dataset.vocabulary[ params['OUTPUTS_IDS_DATASET'][0]], vocabulary_len=dataset.vocabulary_len[ params['OUTPUTS_IDS_DATASET'][0]], max_len=params['MAX_OUTPUT_TEXT_LEN_TEST'], offset=0, fill=dataset.fill_text[params['OUTPUTS_IDS_DATASET'] [0]], pad_on_batch=dataset.pad_on_batch[ params['OUTPUTS_IDS_DATASET'][0]], words_so_far=False, sample_weights=params['SAMPLE_WEIGHTS'], loading_X=False) # 4.2 Train online! online_trainer.train_online( [np.asarray([current_input]), state_below], trg_seq, trg_words=[reference]) # 5 Write correct sentences into a file list2file(args.dest, [hypothesis], permission='a') if (n_sample + 1) % 50 == 0: logger.info(u"%d sentences processed" % (n_sample + 1)) logger.info(u"Current speed is {} per sentence".format( (time.time() - start_time) / (n_sample + 1))) logger.info(u"Current WSR is: %f" % (float(total_errors) / total_words)) logger.info(u"Current MAR is: %f" % (float(total_mouse_actions) / total_words)) logger.info(u"Current MAR_c is: %f" % (float(total_mouse_actions) / total_chars)) logger.info(u"Current KSMR is: %f" % (float(total_errors + total_mouse_actions) / total_chars)) # 6. Final! # 6.1 Log some information print(u"Total number of errors:", total_errors) print(u"Total number selections", total_mouse_actions) print(u"WSR: %f" % (float(total_errors) / total_words)) print(u"MAR: %f" % (float(total_mouse_actions) / total_words)) print(u"MAR_c: %f" % (float(total_mouse_actions) / total_chars)) print(u"KSMR: %f" % (float(total_errors + total_mouse_actions) / total_chars)) except KeyboardInterrupt: print(u'Interrupted!') print(u"Total number of corrections (up to now):", total_errors) print(u"WSR: %f" % (float(total_errors) / total_words)) print(u"MAR: %f" % (float(total_mouse_actions) / total_words)) print(u"MAR_c: %f" % (float(total_mouse_actions) / total_chars)) print(u"KSMR: %f" % (float(total_errors + total_mouse_actions) / total_chars))
def build_dataset(params): if params['REBUILD_DATASET']: # We build a new dataset instance if params['VERBOSE'] > 0: silence = False logging.info('Building ' + params['DATASET_NAME'] + ' dataset') else: silence = True base_path = params['DATA_ROOT_PATH'] name = params['DATASET_NAME'] ds = Dataset(name, base_path, silence=silence) if not '-vidtext-embed' in params['DATASET_NAME']: # OUTPUT DATA # Let's load the train, val and test splits of the descriptions (outputs) # the files include a description per line. In this dataset a variable number # of descriptions per video are provided. ds.setOutput(base_path + '/' + params['DESCRIPTION_FILES']['train'], 'train', type='text', id=params['OUTPUTS_IDS_DATASET'][0], build_vocabulary=True, tokenization=params['TOKENIZATION_METHOD'], fill=params['FILL'], pad_on_batch=True, max_text_len=params['MAX_OUTPUT_TEXT_LEN'], sample_weights=params['SAMPLE_WEIGHTS'], min_occ=params['MIN_OCCURRENCES_VOCAB']) ds.setOutput(base_path + '/' + params['DESCRIPTION_FILES']['val'], 'val', type='text', id=params['OUTPUTS_IDS_DATASET'][0], build_vocabulary=True, pad_on_batch=True, tokenization=params['TOKENIZATION_METHOD'], sample_weights=params['SAMPLE_WEIGHTS'], max_text_len=params['MAX_OUTPUT_TEXT_LEN_TEST'], min_occ=params['MIN_OCCURRENCES_VOCAB']) ds.setOutput(base_path + '/' + params['DESCRIPTION_FILES']['test'], 'test', type='text', id=params['OUTPUTS_IDS_DATASET'][0], build_vocabulary=True, pad_on_batch=True, tokenization=params['TOKENIZATION_METHOD'], sample_weights=params['SAMPLE_WEIGHTS'], max_text_len=params['MAX_OUTPUT_TEXT_LEN_TEST'], min_occ=params['MIN_OCCURRENCES_VOCAB']) else: # Use descriptions as inputs instead --> 'matching'/'non-matching' as output ds.setInput(base_path + '/' + params['DESCRIPTION_FILES']['train'], 'train', type='text', id=params['INPUTS_IDS_DATASET'][1], build_vocabulary=True, tokenization=params['TOKENIZATION_METHOD'], fill=params['FILL'], pad_on_batch=True, max_text_len=params['MAX_OUTPUT_TEXT_LEN'], min_occ=params['MIN_OCCURRENCES_VOCAB']) ds.setInput(base_path + '/' + params['DESCRIPTION_FILES']['val'], 'val', type='text', id=params['INPUTS_IDS_DATASET'][1], build_vocabulary=True, pad_on_batch=True, tokenization=params['TOKENIZATION_METHOD'], max_text_len=params['MAX_OUTPUT_TEXT_LEN_TEST'], min_occ=params['MIN_OCCURRENCES_VOCAB']) ds.setInput(base_path + '/' + params['DESCRIPTION_FILES']['test'], 'test', type='text', id=params['INPUTS_IDS_DATASET'][1], build_vocabulary=True, pad_on_batch=True, tokenization=params['TOKENIZATION_METHOD'], max_text_len=params['MAX_OUTPUT_TEXT_LEN_TEST'], min_occ=params['MIN_OCCURRENCES_VOCAB']) # INPUT DATA # Let's load the associated videos (inputs) # we must take into account that in this dataset we have a different number of sentences per video, # for this reason we introduce the parameter 'repeat_set'=num_captions, where num_captions is a list # containing the number of captions in each video. num_captions_train = np.load(base_path + '/' + params['DESCRIPTION_COUNTS_FILES']['train']) num_captions_val = np.load(base_path + '/' + params['DESCRIPTION_COUNTS_FILES']['val']) num_captions_test = np.load(base_path + '/' + params['DESCRIPTION_COUNTS_FILES']['test']) for feat_type in params['FEATURE_NAMES']: for split, num_cap in zip(['train', 'val', 'test'], [num_captions_train, num_captions_val, num_captions_test]): list_files = base_path + '/' + params['FRAMES_LIST_FILES'][split] % feat_type counts_files = base_path + '/' + params['FRAMES_COUNTS_FILES'][split] % feat_type ds.setInput([list_files, counts_files], split, type=params['INPUT_DATA_TYPE'], id=params['INPUTS_IDS_DATASET'][0], repeat_set=num_cap, max_video_len=params['NUM_FRAMES'], feat_len=params['IMG_FEAT_SIZE'], data_augmentation_types=params['DATA_AUGMENTATION_TYPE']) if not '-vidtext-embed' in params['DATASET_NAME'] and len(params['INPUTS_IDS_DATASET']) > 1: ds.setInput(base_path + '/' + params['DESCRIPTION_FILES']['train'], 'train', type='text', id=params['INPUTS_IDS_DATASET'][1], required=False, tokenization=params['TOKENIZATION_METHOD'], pad_on_batch=True, build_vocabulary=params['OUTPUTS_IDS_DATASET'][0], offset=1, fill=params['FILL'], max_text_len=params['MAX_OUTPUT_TEXT_LEN'], max_words=params['OUTPUT_VOCABULARY_SIZE'], min_occ=params['MIN_OCCURRENCES_VOCAB']) ds.setInput(None, 'val', type='ghost', id=params['INPUTS_IDS_DATASET'][1], required=False) ds.setInput(None, 'test', type='ghost', id=params['INPUTS_IDS_DATASET'][1], required=False) # Set inputs for temporally-linked samples if not '-vidtext-embed' in params['DATASET_NAME'] and '-linked' in params['DATASET_NAME']: # Set input captions from previous event/video if '-upperbound' not in params['DATASET_NAME']: if '-vidtext' in params['DATASET_NAME']: # use both previous video and previous description ds, repeat_images = insertTemporallyLinkedCaptionsVidText(ds, params, vidtext_set_names={ 'video': ['train', 'val', 'test'], 'text': ['train']}) del repeat_images['test'] del repeat_images['val'] # Insert empty prev_descriptions on val and test sets ds.setInput([], 'val', type='text', id=params['INPUTS_IDS_DATASET'][2], build_vocabulary=params['OUTPUTS_IDS_DATASET'][0], tokenization=params['TOKENIZATION_METHOD'], fill=params['FILL'], pad_on_batch=True, max_text_len=params['MAX_OUTPUT_TEXT_LEN'], min_occ=params['MIN_OCCURRENCES_VOCAB'], required=False, overwrite_split=True) ds.setInput([], 'test', type='text', id=params['INPUTS_IDS_DATASET'][2], build_vocabulary=params['OUTPUTS_IDS_DATASET'][0], tokenization=params['TOKENIZATION_METHOD'], fill=params['FILL'], pad_on_batch=True, max_text_len=params['MAX_OUTPUT_TEXT_LEN'], min_occ=params['MIN_OCCURRENCES_VOCAB'], required=False, overwrite_split=True) elif '-video' in params['DATASET_NAME']: ds, repeat_images = insertTemporallyLinkedCaptions(ds, params, set_names=['train', 'val', 'test'], video=True) num_captions_val = repeat_images['val'] num_captions_test = repeat_images['test'] else: ds, repeat_images = insertTemporallyLinkedCaptions(ds, params) # Insert empty prev_descriptions on val and test sets ds.setInput([], 'val', type='text', id=params['INPUTS_IDS_DATASET'][2], build_vocabulary=params['OUTPUTS_IDS_DATASET'][0], tokenization=params['TOKENIZATION_METHOD'], fill=params['FILL'], pad_on_batch=True, max_text_len=params['MAX_OUTPUT_TEXT_LEN'], min_occ=params['MIN_OCCURRENCES_VOCAB'], required=False, overwrite_split=True) ds.setInput([], 'test', type='text', id=params['INPUTS_IDS_DATASET'][2], build_vocabulary=params['OUTPUTS_IDS_DATASET'][0], tokenization=params['TOKENIZATION_METHOD'], fill=params['FILL'], pad_on_batch=True, max_text_len=params['MAX_OUTPUT_TEXT_LEN'], min_occ=params['MIN_OCCURRENCES_VOCAB'], required=False, overwrite_split=True) else: ds, repeat_images = insertTemporallyLinkedCaptions(ds, params, set_names=['train', 'val', 'test'], upperbound=True, video='-video' in params['DATASET_NAME'], copy='-copy' in params['DATASET_NAME'], force_nocopy='-nocopy' in params['DATASET_NAME'], prev='-prev' in params['DATASET_NAME']) num_captions_val = repeat_images['val'] num_captions_test = repeat_images['test'] if not '-vidtext-embed' in params['DATASET_NAME']: # Process dataset for keeping only one caption per video and storing the rest in a dict() with the following format: # ds.extra_variables[set_name][id_output][img_position] = [cap1, cap2, cap3, ..., capN] keep_n_captions(ds, repeat=[num_captions_val, num_captions_test], n=1, set_names=['val', 'test']) else: # Set outputs for -vidtext-embed model insertVidTextEmbedNegativeSamples(ds, params, repeat=[num_captions_train, num_captions_val, num_captions_test]) if not '-vidtext-embed' in params['DATASET_NAME'] and \ '-linked' in params['DATASET_NAME'] and \ '-upperbound' not in params['DATASET_NAME'] and \ '-video' not in params['DATASET_NAME']: # Set previous data indices for s, file in params['LINK_SAMPLE_FILES'].iteritems(): if s in repeat_images: rep = repeat_images[s] else: rep = 1 ds.setInput(base_path + '/' + file, s, type='id', id=params['INPUTS_IDS_DATASET'][-1], repeat_set=rep) # We have finished loading the dataset, now we can store it for using it in the future saveDataset(ds, params['DATASET_STORE_PATH']) else: # We can easily recover it with a single line ds = loadDataset(params['DATASET_STORE_PATH'] + '/Dataset_' + params['DATASET_NAME'] + '.pkl') # Load vocabulary-related parameters of dataset used for pre-training if params['PRE_TRAINED_DATASET_NAME'] is not None: logging.info('Re-using previous dataset vocabulary ' + params['PRE_TRAINED_DATASET_NAME']) dataset_pretrained = loadDataset( params['DATASET_STORE_PATH'] + 'Dataset_' + params['PRE_TRAINED_DATASET_NAME'] + '.pkl') for id_new, id_old in params['VOCABULARIES_MAPPING'].iteritems(): ds.vocabulary[id_new] = copy.deepcopy(dataset_pretrained.vocabulary[id_old]) ds.vocabulary_len[id_new] = copy.deepcopy(dataset_pretrained.vocabulary_len[id_old]) elif params['PRE_TRAINED_VOCABULARY_NAME'] is not None: logging.info('Re-using previous vocabulary ' + params['PRE_TRAINED_VOCABULARY_NAME']) dataset_pretrained_vocabulary = pkl2dict( params['DATASET_STORE_PATH'] + params['PRE_TRAINED_VOCABULARY_NAME'] + '.pkl') for id_new, id_old in params['VOCABULARIES_MAPPING'].iteritems(): ds.vocabulary[id_new] = copy.deepcopy(dataset_pretrained_vocabulary[id_old]) ds.vocabulary_len[id_new] = len(dataset_pretrained_vocabulary[id_old]['idx2words']) return ds