def run_one_config(opt, model_type, case_study=False): set_random_seeds() dataset = DataSet(opt, model_type) model_manager = ModelManager(opt) model, train_time = model_manager.build_model(model_type, dataset) evaluator = Evaluator(opt) metrics = evaluator.eval(model, model_type, dataset.test_loader) evaluator.write_performance(model_type, metrics, train_time) run_case_study(model, dataset, opt, case_study)
'u_vocab_file': path + 'u.txt', 'v_vocab_file': path + 'v.txt', 't_vocab_file': path + 't.txt', 'train_data_file': path + 'train.txt', 'test_data_file': path + 'test.txt', 'coor_nor_file': path + 'coor_nor.txt', 'distance_file': path + 'distance.txt', 'train_log_file': path + 'log.txt', 'candidate_file': path + 'candidate.pk', 'id_offset': 1, 'n_epoch': 80, 'batch_size': 50, 'data_worker': 1, 'load_model': False, 'emb_dim_d': 16, # for distance embedding #best 16 'emb_dim_v': 16, #origin 32 best 16 'emb_dim_t': 8, #origin 8 'emb_dim_u': 32, # !!!jiayi copy from v3 origin 32 best 32 'hidden_dim': 16, #origin 16 'nb_cnt': 16, 'save_gap': 10, 'dropout': 0.5, 'epoch': 80 } dataset = DataSet(opt) manager = ModelManager(opt) model_type = 'birnnt' manager.build_model(model_type, dataset) print "evaluate" manager.evaluate(model_type, dataset)