random.seed(args.random_seed) model_args = args if torch.cuda.is_available() and not args.gpuid: print("WARNING: You have a CUDA device, should run with -gpuid 0") if args.gpuid: cuda.set_device(args.gpuid[0]) if args.random_seed > 0: torch.cuda.manual_seed(args.random_seed) loading_timer = tm.time() schema = Schema(model_args.schema_path, None) data_generator = get_data_generator(args, model_args, schema) mappings = data_generator.mappings if args.vocab_only: import sys; sys.exit() if args.verbose: print("Finished loading and pre-processing data, took {:.1f} seconds".format(tm.time() - loading_timer)) # TODO: load from checkpoint ckpt = None # Build the model model = build_model(model_args, args, mappings, ckpt, model_path=args.agent_checkpoint) tally_parameters(model) create_path(args.model_path) config_path = os.path.join(args.model_path, 'config.json')
dummy_parser = argparse.ArgumentParser(description='duh') add_model_arguments(dummy_parser) add_data_generator_arguments(dummy_parser) dummy_args = dummy_parser.parse_known_args([])[0] if cuda.is_available() and not args.gpuid: print("WARNING: You have a CUDA device, should run with --gpuid 0") if args.gpuid: cuda.set_device(args.gpuid[0]) # Load the model. mappings, model, model_args = \ model_builder.load_test_model(args.checkpoint, args, dummy_args.__dict__) # Figure out src and tgt vocab make_model_mappings(model_args.model, mappings) schema = Schema(model_args.schema_path, None) data_generator = get_data_generator(args, model_args, schema, test=True) # Prefix: [GO] scorer = Scorer(args.alpha) generator = get_generator(model, mappings['tgt_vocab'], scorer, args, model_args) builder = UtteranceBuilder(mappings['tgt_vocab'], args.n_best, has_tgt=True) evaluator = Evaluator(model, mappings, generator, builder, gt_prefix=1) evaluator.evaluate(args, model_args, data_generator)