def main(): args = KGEArgParser().get_args(sys.argv[1:]) if Path(args.dataset_path).exists(): kdl = KnowledgeDataLoader(data_dir=args.dataset_path, negative_sampling=args.sampling) kg = kdl.get_knowledge_graph() print('Successfully loaded {} triples from {}.'.format( len(kdl.triples), kdl.data_dir)) else: print('Unable to find dataset from path:', args.dataset_path) print( 'Default loading Freebase15k dataset with default hyperparameters...' ) kg = KnowledgeGraph() kg.prepare_data() kg.dump() # TODO: Not sure why new dataset isn't cached on subsequent hits... args.dataset_path = './data/' + kg.dataset_name args.dataset_name = kg.dataset_name # Add new model configurations to run. models = [TransE(transe_config(args=args))] for model in models: print('---- Training Model: {} ----'.format(model.model_name)) trainer = Trainer(model=model, debug=args.debug) trainer.build_model() trainer.train_model() tf.reset_default_graph()
def test(self): """Function to evaluate final model on testing set while training the model using best hyper-paramters on merged training and validation set.""" args = KGEArgParser().get_args([]) args.model = self.model args.dataset_name = self.dataset args.debug = self.debug # Preparing data and cache the data for later usage knowledge_graph = KnowledgeGraph(dataset=args.dataset_name) knowledge_graph.prepare_data() # Extracting the corresponding model config and definition from Importer(). config_def, model_def = Importer().import_model_config( args.model_name.lower()) config = config_def(args=args) # Update the config params with the golden hyperparameter for k, v in self.best.items(): config.__dict__[k] = v model = model_def(config) if self.debug: config.epochs = 1 # Create, Compile and Train the model. While training, several evaluation will be performed. trainer = Trainer(model=model) trainer.build_model() trainer.train_model()
def testing_function(name, distance_measure=None, bilinear=None, display=False): """Function to test the models.""" knowledge_graph = KnowledgeGraph(dataset="freebase15k", negative_sample="uniform") knowledge_graph.prepare_data() # Extracting the corresponding model config and definition from Importer(). config_def, model_def = Importer().import_model_config(name) config = config_def() config.epochs = 1 config.test_step = 1 config.test_num = 10 config.disp_result = display config.save_model = False if distance_measure is not None: config.distance_measure = distance_measure if bilinear is not None: config.bilinear = bilinear model = model_def(config) # Create, Compile and Train the model. While training, several evaluation will be performed. trainer = Trainer(model=model, debug=True) trainer.build_model() trainer.train_model() tf.reset_default_graph()
def testing_function(name): """Function to test the models with arguments.""" # getting the customized configurations from the command-line arguments. args = KGEArgParser().get_args(['-exp', 'True', '-mn', name]) # Preparing data and cache the data for later usage knowledge_graph = KnowledgeGraph(dataset=args.dataset_name) knowledge_graph.prepare_data() # Extracting the corresponding model config and definition from Importer(). config_def, model_def = Importer().import_model_config(name) config = config_def(args) config.epochs = 1 config.test_step = 1 config.test_num = 10 config.save_model = False config.debug = True config.ent_hidden_size = 10 config.rel_hidden_size = 10 config.channels = 2 model = model_def(**config.__dict__) # Create, Compile and Train the model. While training, several evaluation will be performed. trainer = Trainer(model, config) trainer.build_model() trainer.train_model()
def main(): # getting the customized configurations from the command-line arguments. args = KGEArgParser().get_args(sys.argv[1:]) # Preparing data and cache the data for later usage knowledge_graph = KnowledgeGraph(dataset=args.dataset_name, negative_sample=args.sampling, custom_dataset_path=args.dataset_path) knowledge_graph.prepare_data() # Extracting the corresponding model config and definition from Importer(). config_def, model_def = Importer().import_model_config( args.model_name.lower()) config = config_def(args=args) model = model_def(config) # Create, Compile and Train the model. While training, several evaluation will be performed. trainer = Trainer(model=model, debug=args.debug) trainer.build_model() trainer.train_model() #can perform all the inference here after training the model trainer.enter_interactive_mode() code.interact(local=locals()) trainer.exit_interactive_mode()
def testing_function_with_args(name, distance_measure=None, bilinear=None, display=False): """Function to test the models with arguments.""" # getting the customized configurations from the command-line arguments. args = KGEArgParser().get_args([]) # Preparing data and cache the data for later usage knowledge_graph = KnowledgeGraph(dataset=args.dataset_name, negative_sample=args.sampling) knowledge_graph.prepare_data() # Extracting the corresponding model config and definition from Importer(). config_def, model_def = Importer().import_model_config(name) config = config_def(args=args) config.epochs = 1 config.test_step = 1 config.test_num = 10 config.disp_result = display config.save_model = False model = model_def(config) # Create, Compile and Train the model. While training, several evaluation will be performed. trainer = Trainer(model=model, debug=True) trainer.build_model() trainer.train_model() tf.reset_default_graph()
def main(): args = KGEArgParser().get_args(sys.argv[1:]) knowledge_graph = KnowledgeGraph(dataset=args.dataset_name, custom_dataset_path=args.dataset_path) knowledge_graph.prepare_data() config_def, model_def = Importer().import_model_config(args.model_name.lower()) config = config_def(args) model = model_def(**config.__dict__) trainer = Trainer(model, config) trainer.build_model() trainer.train_model()
def testing_function_with_args(name, l1_flag, distance_measure=None, bilinear=None, display=False): """Function to test the models with arguments.""" tf.reset_default_graph() # getting the customized configurations from the command-line arguments. args = KGEArgParser().get_args([]) # Preparing data and cache the data for later usage knowledge_graph = KnowledgeGraph(dataset=args.dataset_name, negative_sample=args.sampling) knowledge_graph.prepare_data() # Extracting the corresponding model config and definition from Importer(). config_def, model_def = Importer().import_model_config(name) config = config_def(args=args) config.epochs = 1 config.test_step = 1 config.test_num = 10 config.disp_result = display config.save_model = True config.L1_flag = l1_flag model = model_def(config) # Create, Compile and Train the model. While training, several evaluation will be performed. trainer = Trainer(model=model, debug=True) trainer.build_model() trainer.train_model() #can perform all the inference here after training the model trainer.enter_interactive_mode() #takes head, relation tails = trainer.infer_tails(1, 10, topk=5) assert len(tails) == 5 #takes relation, tail heads = trainer.infer_heads(10, 20, topk=5) assert len(heads) == 5 #takes head, tail relations = trainer.infer_rels(1, 20, topk=5) assert len(relations) == 5 trainer.exit_interactive_mode()
def test_DistMult(self): config = DistMultConfig(batch_size=512, epochs=1) config.set_dataset("Freebase15k") config.test_step = 1 config.test_num = 10 config.gpu_fraction = 0.4 config.save_model = False config.disp_result = False model = DistMult(config) trainer = Trainer(model=model, debug=True) trainer.build_model() trainer.train_model()
def main(): # getting the customized configurations from the command-line arguments. args = KGEArgParser().get_args(sys.argv[1:]) # Preparing data and cache the data for later usage knowledge_graph = KnowledgeGraph(dataset=args.dataset_name, custom_dataset_path=args.dataset_path) knowledge_graph.prepare_data() # Extracting the corresponding model config and definition from Importer(). config_def, model_def = Importer().import_model_config(args.model_name.lower()) config = config_def(args) model = model_def(**config.__dict__) # Create, Compile and Train the model. While training, several evaluation will be performed. trainer = Trainer(model, config) trainer.build_model() trainer.train_model()
def main(): args = KGEArgParser().get_args(sys.argv[1:]) config_def, model_def = Importer().import_model_config( args.model_name.lower()) config = config_def(args) model = model_def(**config.__dict__) trainer = Trainer(model, config) trainer.build_model() if config.load_from_data is None: trainer.train_model() trainer.infer_tails(1, 10, topk=5) trainer.infer_heads(10, 20, topk=5) trainer.infer_rels(1, 20, topk=5)
def test_SMEB(self): config = SMEConfig(batch_size=512, epochs=1, hidden_size=8) config.set_dataset("Freebase15k") config.test_step = 1 config.test_num = 10 config.gpu_fraction = 0.4 config.save_model = True config.disp_result = False config.bilinear = True model = SME(config) trainer = Trainer(model=model, debug=True) trainer.build_model() trainer.train_model()
def test_ProjE(self): config = ProjE_pointwiseConfig(learning_rate=0.01, batch_size=512, epochs=1) config.set_dataset("Freebase15k") config.test_step = 1 config.test_num = 10 config.gpu_fraction = 0.4 config.save_model = False config.disp_result = False model = ProjE_pointwise(config) trainer = Trainer(model=model, debug=True) trainer.build_model() trainer.train_model()
def test_KG2E_EL(self): config = KG2EConfig(batch_size=512, epochs=1, distance_measure="expected_likelihood") config.set_dataset("Freebase15k") config.test_step = 1 config.test_num = 10 config.gpu_fraction = 0.4 config.save_model = False config.disp_result = False model = KG2E(config) trainer = Trainer(model=model, debug=True) trainer.build_model() trainer.train_model()
def test_transH(self): config = TransHConfig(batch_size=512, epochs=1, hidden_size=16) config.set_dataset("Freebase15k") config.test_step = 1 config.test_num = 10 config.gpu_fraction = 0.4 config.save_model = False config.disp_result = False config.C = 0.125 config.sampling = "bern" model = TransH(config) trainer = Trainer(model=model, debug=True) trainer.build_model() trainer.train_model()
def test_TransD(self): config = TransDConfig(batch_size=512, epochs=1, ent_hidden_size=8, rel_hidden_size=8) config.set_dataset("Freebase15k") config.test_step = 1 config.test_num = 10 config.gpu_fraction = 0.4 config.save_model = False config.disp_result = False model = TransD(config) trainer = Trainer(model=model, debug=True) trainer.build_model() trainer.train_model()
def experiment(model_name): args = KGEArgParser().get_args([]) args.exp = True args.dataset_name = "fb15k" # Preparing data and cache the data for later usage knowledge_graph = KnowledgeGraph(dataset=args.dataset_name, custom_dataset_path=args.dataset_path) knowledge_graph.prepare_data() # Extracting the corresponding model config and definition from Importer(). config_def, model_def = Importer().import_model_config(model_name) config = config_def(args) model = model_def(**config.__dict__) # Create, Compile and Train the model. While training, several evaluation will be performed. trainer = Trainer(model, config) trainer.build_model() trainer.train_model()
def main(): # getting the customized configurations from the command-line arguments. args = KGEArgParser().get_args(sys.argv[1:]) # Extracting the corresponding model config and definition from Importer(). config_def, model_def = Importer().import_model_config(args.model_name.lower()) config = config_def(args) model = model_def(**config.__dict__) # Create the model and load the trained weights. trainer = Trainer(model, config) trainer.build_model() if config.load_from_data is None: trainer.train_model() trainer.infer_tails(1, 10, topk=5) trainer.infer_heads(10, 20, topk=5) trainer.infer_rels(1, 20, topk=5)
def test_early_stopping_on_ranks(tmpdir, monitor): result_path_dir = tmpdir.mkdir("result_path") configured_epochs = 10 model, config = get_model(result_path_dir, configured_epochs, 0, "complex") trainer = Trainer(model, config) trainer.build_model(monitor=monitor) actual_epochs = trainer.train_model() assert actual_epochs < configured_epochs - 1
def test_full_epochs(tmpdir, config_key): result_path_dir = tmpdir.mkdir("result_path") configured_epochs = 10 model, config = get_model(result_path_dir, configured_epochs, -1, config_key) trainer = Trainer(model, config) trainer.build_model() actual_epochs = trainer.train_model() assert actual_epochs == configured_epochs - 1
def run_pykg2vec(): # getting the customized configurations from the command-line arguments. args = PyKG2VecArgParser().get_args(sys.argv[1:]) args.dataset_path = preprocess(args.triples_path, args.dataset_name) # Preparing data and cache the data for later usage knowledge_graph = KnowledgeGraph(dataset=args.dataset_name, negative_sample=args.sampling, custom_dataset_path=args.dataset_path) knowledge_graph.prepare_data() # Extracting the corresponding model config and definition from Importer(). config_def, model_def = Importer().import_model_config( args.model_name.lower()) config = config_def(args=args) model = model_def(config) # Create, Compile and Train the model. While training, several evaluation will be performed. trainer = Trainer(model=model, debug=args.debug) trainer.build_model() trainer.train_model()
def testing_function(name, distance_measure=None, bilinear=None, display=False, ent_hidden_size=None, rel_hidden_size=None, channels=None): """Function to test the models with arguments.""" # getting the customized configurations from the command-line arguments. args = KGEArgParser().get_args(['-exp', 'True']) # Preparing data and cache the data for later usage knowledge_graph = KnowledgeGraph(dataset=args.dataset_name) knowledge_graph.prepare_data() # Extracting the corresponding model config and definition from Importer(). config_def, model_def = Importer().import_model_config(name) config = config_def(args) config.epochs = 1 config.test_step = 1 config.test_num = 10 config.disp_result = display config.save_model = False config.debug = True if ent_hidden_size: config.ent_hidden_size = ent_hidden_size if rel_hidden_size: config.rel_hidden_size = rel_hidden_size if channels: config.channels = channels model = model_def(config) # Create, Compile and Train the model. While training, several evaluation will be performed. trainer = Trainer(model=model) trainer.build_model() trainer.train_model()
def test_visualization(tmpdir): result_path_dir = tmpdir.mkdir("result_path") args = KGEArgParser().get_args([]) knowledge_graph = KnowledgeGraph(dataset="Freebase15k") knowledge_graph.prepare_data() config_def, model_def = Importer().import_model_config("analogy") config = config_def(args=args) config.epochs = 5 config.test_step = 1 config.test_num = 1 config.disp_result = True config.save_model = False config.debug = True config.patience = -1 config.plot_embedding = True config.plot_training_result = True config.plot_testing_result = True config.path_figures = result_path_dir config.path_result = result_path_dir trainer = Trainer(model_def(**config.__dict__), config) trainer.build_model() trainer.train_model() files = [f for f in listdir(result_path_dir)] assert any(map(lambda f: "_entity_plot" in f, files)) assert any(map(lambda f: "_rel_plot" in f, files)) assert any(map(lambda f: "_ent_n_rel_plot" in f, files)) assert any(map(lambda f: "_training_loss_plot_" in f, files)) assert any(map(lambda f: "_testing_hits_plot" in f, files)) assert any(map(lambda f: "_testing_latex_table_" in f, files)) assert any(map(lambda f: "_testing_table_" in f, files)) assert any(map(lambda f: "_testing_rank_plot_" in f, files)) assert any(map(lambda f: "_testing_hits_plot_" in f, files))
def main(): # getting the customized configurations from the command-line arguments. args = KGEArgParser().get_args(sys.argv[1:]) # Preparing data and cache the data for later usage knowledge_graph = KnowledgeGraph(dataset=args.dataset_name, negative_sample=args.sampling) knowledge_graph.prepare_data() sess_infer = tf.InteractiveSession() # Extracting the corresponding model config and definition from Importer(). config_def, model_def = Importer().import_model_config(args.model_name.lower()) config = config_def(args=args) model = model_def(config) # Create, Compile and Train the model. While training, several evaluation will be performed. trainer = Trainer(model=model, debug=args.debug) trainer.build_model() trainer.train_model() #can perform all the inference here after training the model #takes head, relation trainer.infer_tails(1,10,sess_infer,topk=5) #takes relation, tails trainer.infer_heads(10,20,sess_infer,topk=5) sess_infer.close()
def main(): model_name = "transe" dataset_name = "Freebase15k" # 1. Tune the hyper-parameters for the selected model and dataset. # p.s. this is using training and validation set. args = KGETuneArgParser().get_args( ['-mn', model_name, '-ds', dataset_name]) # initializing bayesian optimizer and prepare data. bays_opt = BaysOptimizer(args=args) # perform the golden hyperparameter tuning. bays_opt.optimize() best = bays_opt.return_best() # 2. Evaluate final model using the found best hyperparameters on testing set. args = KGEArgParser().get_args(['-mn', model_name, '-ds', dataset_name]) # Preparing data and cache the data for later usage knowledge_graph = KnowledgeGraph(dataset=args.dataset_name) knowledge_graph.prepare_data() # Extracting the corresponding model config and definition from Importer(). config_def, model_def = Importer().import_model_config( args.model_name.lower()) config = config_def(args) # Update the config params with the golden hyperparameter for k, v in best.items(): config.__dict__[k] = v model = model_def(**config.__dict__) # Create, Compile and Train the model. trainer = Trainer(model, config) trainer.build_model() trainer.train_model()
def experiment(): # preparing dataset. knowledge_graph = DataPrep('Freebase15k') # preparing settings. epochs = 5 batch_size = 128 learning_rate = 0.01 hidden_size = 50 transEconfig = TransEConfig(learning_rate=learning_rate, batch_size=batch_size, epochs=epochs, hidden_size=hidden_size) transHconfig = TransHConfig(learning_rate=learning_rate, batch_size=batch_size, epochs=epochs, hidden_size=hidden_size) transRconfig = TransRConfig(learning_rate=learning_rate, batch_size=batch_size, ent_hidden_size=64, rel_hidden_size=32, epochs=epochs) rescalconfig = RescalConfig(learning_rate=0.1, batch_size=batch_size, epochs=epochs, hidden_size=hidden_size) smeconfig = SMEConfig(learning_rate=learning_rate, batch_size=batch_size, epochs=epochs, hidden_size=hidden_size) configs = [ transEconfig, transHconfig, transRconfig, rescalconfig, smeconfig ] for config in configs: config.test_step = 2 config.test_num = 100 config.save_model = True config.disp_result = False # preparing models. models = [] models.append(TransE(transEconfig, knowledge_graph)) models.append(TransH(transHconfig, knowledge_graph)) models.append(TransR(transRconfig, knowledge_graph)) models.append(Rescal(rescalconfig, knowledge_graph)) models.append(SMEBilinear(smeconfig, knowledge_graph)) models.append(SMELinear(smeconfig, knowledge_graph)) # train models. for model in models: print("training model %s" % model.model_name) trainer = Trainer(model=model) trainer.build_model() trainer.train_model() trainer.full_test() tf.reset_default_graph()