default_value=False, help_str="Skip training and evaluate only"), Option( "eval_metrics", default_value="bleu", help_str="Comma-separated list of evaluation metrics (bleu/wer/cer)" ), Option("run_for_epochs", int, help_str="How many epochs to run each test for"), ] config_parser.add_task("experiment", experiment_options) if args.generate_doc: print(config_parser.generate_options_table()) exit(0) if args.dynet_seed: random.seed(args.dynet_seed) np.random.seed(args.dynet_seed) config = config_parser.args_from_config_file(args.experiments_file) results = [] # Check ahead of time that all experiments exist, to avoid bad surprises experiment_names = args.experiment_name or config.keys() if args.experiment_name: nonexistent = set(experiment_names).difference(config.keys())