def print_eval(prepare_data_fun, out_label): model_file = os.path.join(snapshot_dir, "best_model.pth") pkl_res_file = os.path.join(snapshot_dir, "best_model_predict_%s.pkl" % out_label) out_file = os.path.join(snapshot_dir, "best_model_predict_%s.json" % out_label) data_set_test = prepare_data_fun(**cfg['data'], **cfg['model'], verbose=True) data_reader_test = DataLoader(data_set_test, shuffle=False, batch_size=cfg.data.batch_size, num_workers=cfg.data.num_workers) ans_dic = data_set_test.answer_dict model = build_model(cfg, data_set_test) model.load_state_dict(torch.load(model_file)['state_dict']) model.eval() question_ids, soft_max_result = run_model(model, data_reader_test, ans_dic.UNK_idx) print_result(question_ids, soft_max_result, ans_dic, out_file, json_only=False, pkl_res_file=pkl_res_file)
def print_eval(prepare_data_fun, out_label): model_file = os.path.join(snapshot_dir, "best_model.pth") pkl_res_file = os.path.join(snapshot_dir, "best_model_predict_%s.pkl" % out_label) out_file = os.path.join(snapshot_dir, "best_model_predict_%s.json" % out_label) data_set_test = prepare_data_fun(**cfg["data"], **cfg["model"], verbose=True) data_reader_test = DataLoader( data_set_test, shuffle=False, batch_size=cfg.data.batch_size, num_workers=cfg.data.num_workers, ) ans_dic = data_set_test.answer_dict model = build_model(cfg, data_set_test) model.load_state_dict(torch.load(model_file)["state_dict"]) model.eval() question_ids, soft_max_result = run_model(model, data_reader_test, ans_dic.UNK_idx) print_result( question_ids, soft_max_result, ans_dic, out_file, json_only=False, pkl_res_file=pkl_res_file, )
config_file = config_files[0] with open(config_file, "r") as f: config = yaml.load(f) batch_size = config["data"]["batch_size"] data_set_test = prepare_eval_data_set(**config["data"], **config["model"], verbose=True) data_reader_test = DataLoader(data_set_test, shuffle=False, batch_size=batch_size, num_workers=5) ans_dic = data_set_test.answer_dict accumulated_softmax = None final_result = {} n_model = 0 for c_file, model_file in zip(config_files, model_pths): with open(c_file, "r") as f: config = yaml.load(f) myModel = build_model(config, data_set_test) myModel.load_state_dict(torch.load(model_file)["state_dict"]) question_ids, soft_max_result = run_model(myModel, data_reader_test, ans_dic.UNK_idx) if n_model == 0: final_result = soft_max_result