def test_generator_pairwise(): """Function to test the generator for pairwise based algorithm.""" knowledge_graph = KnowledgeGraph(dataset="freebase15k") knowledge_graph.force_prepare_data() config_def, model_def = Importer().import_model_config('transe') config = config_def(KGEArgParser().get_args([])) generator = Generator(model_def(**config.__dict__), config) generator.start_one_epoch(10) for i in range(10): data = list(next(generator)) assert len(data) == 6 ph = data[0] pr = data[1] pt = data[2] nh = data[3] nr = data[4] nt = data[5] assert len(ph) == len(pr) assert len(ph) == len(pt) assert len(ph) == len(nh) assert len(ph) == len(nr) assert len(ph) == len(nt) generator.stop()
def __init__(self, args): """store the information of database""" if args.model_name.lower() in [ "tucker", "conve", "convkb", "proje_pointwise" ]: raise Exception( "Model %s has not been supported in tuning hyperparameters!" % args.model) self.model_name = args.model_name self.knowledge_graph = KnowledgeGraph( dataset=args.dataset_name, custom_dataset_path=args.dataset_path) self.kge_args = KGEArgParser().get_args([]) self.kge_args.dataset_name = args.dataset_name self.kge_args.debug = args.debug self.kge_args.device = args.device self.max_evals = args.max_number_trials if not args.debug else 3 self.config_obj, self.model_obj = Importer().import_model_config( self.model_name.lower()) self.config_local = self.config_obj(self.kge_args) self.search_space = HyperparameterLoader(args).load_search_space( self.model_name.lower()) self._best_result = None self.trainer = None
def test_inference_on_pretrained_model(): args = KGEArgParser().get_args([]) config_def, model_def = Importer().import_model_config("transe") config = config_def(args) config.load_from_data = os.path.join(os.path.dirname(__file__), "resource", "pretrained", "TransE", Trainer.TRAINED_MODEL_FILE_NAME) model = model_def(**config.__dict__) # Create the model and load the trained weights. trainer = Trainer(model, config) trainer.build_model() #takes head, relation tails = trainer.infer_tails(1, 10, topk=5) assert len(tails) == 5 #takes relation, tail heads = trainer.infer_heads(10, 20, topk=5) assert len(heads) == 5 #takes head, tail relations = trainer.infer_rels(1, 20, topk=5) assert len(relations) == 5
def testing_function(name): """Function to test the models with arguments.""" # getting the customized configurations from the command-line arguments. args = KGEArgParser().get_args(['-exp', 'True', '-mn', name]) # Preparing data and cache the data for later usage knowledge_graph = KnowledgeGraph(dataset=args.dataset_name) knowledge_graph.prepare_data() # Extracting the corresponding model config and definition from Importer(). config_def, model_def = Importer().import_model_config(name) config = config_def(args) config.epochs = 1 config.test_step = 1 config.test_num = 10 config.save_model = False config.debug = True config.ent_hidden_size = 10 config.rel_hidden_size = 10 config.channels = 2 model = model_def(**config.__dict__) # Create, Compile and Train the model. While training, several evaluation will be performed. trainer = Trainer(model, config) trainer.build_model() trainer.train_model()
def test_error_on_building_pretrained_model(): with pytest.raises(ValueError) as e: args = KGEArgParser().get_args([]) config_def, model_def = Importer().import_model_config("transe") config = config_def(args) config.load_from_data = "pretrained-model-does-not-exist" model = model_def(**config.__dict__) trainer = Trainer(model, config) trainer.build_model() assert "Cannot load model from %s" % config.load_from_data in str(e)
def main(): args = KGEArgParser().get_args(sys.argv[1:]) knowledge_graph = KnowledgeGraph(dataset=args.dataset_name, custom_dataset_path=args.dataset_path) knowledge_graph.prepare_data() config_def, model_def = Importer().import_model_config(args.model_name.lower()) config = config_def(args) model = model_def(**config.__dict__) trainer = Trainer(model, config) trainer.build_model() trainer.train_model()
def main(): args = KGEArgParser().get_args(sys.argv[1:]) config_def, model_def = Importer().import_model_config(args.model_name.lower()) config = config_def(args) model = model_def(**config.__dict__) trainer = Trainer(model, config) trainer.build_model() trainer.infer_tails(1, 10, topk=5) trainer.infer_heads(10, 20, topk=5) trainer.infer_rels(1, 20, topk=5)
def main(): args = KGEArgParser().get_args(sys.argv[1:]) config_def, model_def = Importer().import_model_config( args.model_name.lower()) config = config_def(args) model = model_def(**config.__dict__) trainer = Trainer(model, config) trainer.build_model() trainer.model.eval() trainer.evaluator.full_test(0)
def testing_function_with_args(name, l1_flag, distance_measure=None, bilinear=None, display=False): """Function to test the models with arguments.""" # getting the customized configurations from the command-line arguments. args = KGEArgParser().get_args([]) # Preparing data and cache the data for later usage knowledge_graph = KnowledgeGraph(dataset=args.dataset_name) knowledge_graph.prepare_data() # Extracting the corresponding model config and definition from Importer(). config_def, model_def = Importer().import_model_config(name) config = config_def(args) config.epochs = 1 config.test_step = 1 config.test_num = 10 config.disp_result = display config.save_model = True config.L1_flag = l1_flag config.debug = True model = model_def(**config.__dict__) # Create, Compile and Train the model. While training, several evaluation will be performed. trainer = Trainer(model, config) trainer.build_model() trainer.train_model() #can perform all the inference here after training the model trainer.enter_interactive_mode() #takes head, relation tails = trainer.infer_tails(1, 10, topk=5) assert len(tails) == 5 #takes relation, tail heads = trainer.infer_heads(10, 20, topk=5) assert len(heads) == 5 #takes head, tail relations = trainer.infer_rels(1, 20, topk=5) assert len(relations) == 5 trainer.exit_interactive_mode()
def main(): # getting the customized configurations from the command-line arguments. args = KGEArgParser().get_args(sys.argv[1:]) # Extracting the corresponding model config and definition from Importer(). config_def, model_def = Importer().import_model_config( args.model_name.lower()) config = config_def(args) model = model_def(**config.__dict__) # Create the model and load the trained weights. trainer = Trainer(model, config) trainer.build_model() trainer.infer_tails(1, 10, topk=5) trainer.infer_heads(10, 20, topk=5) trainer.infer_rels(1, 20, topk=5)
def main(): # getting the customized configurations from the command-line arguments. args = KGEArgParser().get_args(sys.argv[1:]) # Preparing data and cache the data for later usage knowledge_graph = KnowledgeGraph(dataset=args.dataset_name, custom_dataset_path=args.dataset_path) knowledge_graph.prepare_data() # Extracting the corresponding model config and definition from Importer(). config_def, model_def = Importer().import_model_config(args.model_name.lower()) config = config_def(args) model = model_def(**config.__dict__) # Create, Compile and Train the model. While training, several evaluation will be performed. trainer = Trainer(model, config) trainer.build_model() trainer.train_model()
def experiment(model_name): args = KGEArgParser().get_args([]) args.exp = True args.dataset_name = "fb15k" # Preparing data and cache the data for later usage knowledge_graph = KnowledgeGraph(dataset=args.dataset_name, custom_dataset_path=args.dataset_path) knowledge_graph.prepare_data() # Extracting the corresponding model config and definition from Importer(). config_def, model_def = Importer().import_model_config(model_name) config = config_def(args) model = model_def(**config.__dict__) # Create, Compile and Train the model. While training, several evaluation will be performed. trainer = Trainer(model, config) trainer.build_model() trainer.train_model()
def get_model(result_path_dir, configured_epochs, patience, config_key): args = KGEArgParser().get_args([]) knowledge_graph = KnowledgeGraph(dataset="Freebase15k") knowledge_graph.prepare_data() config_def, model_def = Importer().import_model_config(config_key) config = config_def(args) config.epochs = configured_epochs config.test_step = 1 config.test_num = 1 config.disp_result = False config.save_model = False config.path_result = result_path_dir config.debug = True config.patience = patience return model_def(**config.__dict__), config
def test_generator_pointwise(): """Function to test the generator for pointwise based algorithm.""" knowledge_graph = KnowledgeGraph(dataset="freebase15k") knowledge_graph.force_prepare_data() config_def, model_def = Importer().import_model_config("complex") config = config_def(KGEArgParser().get_args([])) generator = Generator(model_def(**config.__dict__), config) generator.start_one_epoch(10) for i in range(10): data = list(next(generator)) assert len(data) == 4 h = data[0] r = data[1] t = data[2] y = data[3] assert len(h) == len(r) assert len(h) == len(t) assert set(y) == {1, -1} generator.stop()
def test_visualization(tmpdir): result_path_dir = tmpdir.mkdir("result_path") args = KGEArgParser().get_args([]) knowledge_graph = KnowledgeGraph(dataset="Freebase15k") knowledge_graph.prepare_data() config_def, model_def = Importer().import_model_config("analogy") config = config_def(args=args) config.epochs = 5 config.test_step = 1 config.test_num = 1 config.disp_result = True config.save_model = False config.debug = True config.patience = -1 config.plot_embedding = True config.plot_training_result = True config.plot_testing_result = True config.path_figures = result_path_dir config.path_result = result_path_dir trainer = Trainer(model_def(**config.__dict__), config) trainer.build_model() trainer.train_model() files = [f for f in listdir(result_path_dir)] assert any(map(lambda f: "_entity_plot" in f, files)) assert any(map(lambda f: "_rel_plot" in f, files)) assert any(map(lambda f: "_ent_n_rel_plot" in f, files)) assert any(map(lambda f: "_training_loss_plot_" in f, files)) assert any(map(lambda f: "_testing_hits_plot" in f, files)) assert any(map(lambda f: "_testing_latex_table_" in f, files)) assert any(map(lambda f: "_testing_table_" in f, files)) assert any(map(lambda f: "_testing_rank_plot_" in f, files)) assert any(map(lambda f: "_testing_hits_plot_" in f, files))
def load_model(self, model_path=None): """Function to load the model.""" if model_path is None: model_path_file = self.config.path_tmp / self.model.model_name / self.TRAINED_MODEL_FILE_NAME model_path_config = self.config.path_tmp / self.model.model_name / self.TRAINED_MODEL_CONFIG_NAME else: model_path = Path(model_path) model_path_file = model_path / self.TRAINED_MODEL_FILE_NAME model_path_config = model_path / self.TRAINED_MODEL_CONFIG_NAME if model_path_file.exists() and model_path_config.exists(): config_temp = np.load(model_path_config, allow_pickle=True).item() config_temp.__dict__['load_from_data'] = self.config.__dict__[ 'load_from_data'] self.config = config_temp _, model_def = Importer().import_model_config( self.config.model_name.lower()) self.model = model_def(**self.config.__dict__) self.model.load_state_dict(torch.load(str(model_path_file))) self.model.eval() else: raise ValueError("Cannot load model from %s" % model_path_file)
def main(): model_name = "transe" dataset_name = "Freebase15k" # 1. Tune the hyper-parameters for the selected model and dataset. # p.s. this is using training and validation set. args = KGETuneArgParser().get_args( ['-mn', model_name, '-ds', dataset_name]) # initializing bayesian optimizer and prepare data. bays_opt = BaysOptimizer(args=args) # perform the golden hyperparameter tuning. bays_opt.optimize() best = bays_opt.return_best() # 2. Evaluate final model using the found best hyperparameters on testing set. args = KGEArgParser().get_args(['-mn', model_name, '-ds', dataset_name]) # Preparing data and cache the data for later usage knowledge_graph = KnowledgeGraph(dataset=args.dataset_name) knowledge_graph.prepare_data() # Extracting the corresponding model config and definition from Importer(). config_def, model_def = Importer().import_model_config( args.model_name.lower()) config = config_def(args) # Update the config params with the golden hyperparameter for k, v in best.items(): config.__dict__[k] = v model = model_def(**config.__dict__) # Create, Compile and Train the model. trainer = Trainer(model, config) trainer.build_model() trainer.train_model()
def test_generator_proje(): """Function to test the generator for projection based algorithm.""" knowledge_graph = KnowledgeGraph(dataset="freebase15k") knowledge_graph.force_prepare_data() config_def, model_def = Importer().import_model_config("proje_pointwise") config = config_def(KGEArgParser().get_args([])) generator = Generator(model_def(**config.__dict__), config) generator.start_one_epoch(10) for i in range(10): data = list(next(generator)) assert len(data) == 5 h = data[0] r = data[1] t = data[2] hr_t = data[3] tr_h = data[4] assert len(h) == len(r) assert len(h) == len(t) assert isinstance(hr_t, torch.Tensor) assert isinstance(tr_h, torch.Tensor) generator.stop()
config_def, model_def = Importer().import_model_config(config_key) config = config_def(args) config.epochs = configured_epochs config.test_step = 1 config.test_num = 1 config.disp_result = False config.save_model = False config.path_result = result_path_dir config.debug = True config.patience = patience return model_def(**config.__dict__), config @pytest.mark.parametrize("config_key", list(Importer().modelMap.keys())) def test_full_epochs(tmpdir, config_key): result_path_dir = tmpdir.mkdir("result_path") configured_epochs = 10 model, config = get_model(result_path_dir, configured_epochs, -1, config_key) trainer = Trainer(model, config) trainer.build_model() actual_epochs = trainer.train_model() assert actual_epochs == configured_epochs - 1 @pytest.mark.parametrize("monitor", [ Monitor.MEAN_RANK, Monitor.FILTERED_MEAN_RANK, Monitor.MEAN_RECIPROCAL_RANK,
def test_error_on_importing_model(): with pytest.raises(ValueError) as e: Importer().import_model_config("unknown") assert "unknown model has not been implemented. please select from" in str(e)
config_def, model_def = Importer().import_model_config(config_key) config = config_def(args) config.epochs = configured_epochs config.test_step = 1 config.test_num = 1 config.disp_result = False config.save_model = False config.path_result = result_path_dir config.debug = True config.patience = patience return model_def(**config.__dict__), config @pytest.mark.parametrize("config_key", filter(lambda x: x != "conve" and x != "convkb" and x != "transg", list(Importer().modelMap.keys()))) def test_full_epochs(tmpdir, config_key): result_path_dir = tmpdir.mkdir("result_path") configured_epochs = 10 model, config = get_model(result_path_dir, configured_epochs, -1, config_key) trainer = Trainer(model, config) trainer.build_model() actual_epochs = trainer.train_model() assert actual_epochs == configured_epochs - 1 @pytest.mark.parametrize("monitor", [ Monitor.MEAN_RANK, Monitor.FILTERED_MEAN_RANK, Monitor.MEAN_RECIPROCAL_RANK,