def test_early_stopping(self): """Tests early stopping.""" # Set automatic_memory_optimization to false during testing nations = Nations() model: Model = TransE(triples_factory=nations.training) evaluator = RankBasedEvaluator(automatic_memory_optimization=False) stopper = EarlyStopper( model=model, evaluator=evaluator, training_triples_factory=nations.training, evaluation_triples_factory=nations.validation, patience=self.patience, relative_delta=self.relative_delta, metric='mean_rank', ) training_loop = SLCWATrainingLoop( model=model, triples_factory=nations.training, optimizer=Adam(params=model.get_grad_params()), automatic_memory_optimization=False, ) losses = training_loop.train( triples_factory=nations.training, num_epochs=self.max_num_epochs, batch_size=self.batch_size, stopper=stopper, use_tqdm=False, ) self.assertEqual(stopper.number_results, len(losses) // stopper.frequency) self.assertEqual(self.stop_epoch, len(losses), msg='Did not stop early like it should have')
def _fit_transform( self, graph: Graph, return_dataframe: bool = True, verbose: bool = True ) -> Union[np.ndarray, pd.DataFrame, Dict[str, np.ndarray], Dict[ str, pd.DataFrame]]: """Return node embedding""" torch_device = torch.device(self._device) triples_factory = CoreTriplesFactory( torch.IntTensor(graph.get_directed_edge_triples_ids().astype( np.int32)), num_entities=graph.get_number_of_nodes(), num_relations=graph.get_number_of_edge_types(), entity_ids=graph.get_node_ids(), relation_ids=graph.get_unique_edge_type_ids(), create_inverse_triples=False, ) batch_size = min(self._batch_size, graph.get_number_of_directed_edges()) model = self._build_model(triples_factory) if not issubclass(model.__class__, Model): raise NotImplementedError( "The model created with the `_build_model` in the child " f"class {self.__class__.__name__} for the model {self.model_name()} " f"in the library {self.library_name()} did not return a " f"PyKeen model but an object of type {type(model)}.") # Move the model to gpu if we need to model.to(torch_device) training_loop = SLCWATrainingLoop( model=model, triples_factory=triples_factory, ) training_loop.train(triples_factory=triples_factory, num_epochs=self._epochs, batch_size=batch_size, use_tqdm=True, use_tqdm_batch=True, tqdm_kwargs=dict(disable=not verbose)) # Extract and return the embedding return self._extract_embeddings(graph, model, return_dataframe=return_dataframe)
def test_train_slcwa(self) -> None: """Test that sLCWA training does not fail.""" loop = SLCWATrainingLoop( model=self.model, optimizer=Adagrad(params=self.model.get_grad_params(), lr=0.001), ) losses = self._safe_train_loop( loop, num_epochs=self.train_num_epochs, batch_size=self.train_batch_size, sampler=self.sampler, ) self.assertIsInstance(losses, list)
np_array = np.array([[1, 2, 1], [1, 2, 1], [1, 2, 1], [1, 2, 1], [1, 2, 1], [1, 2, 1], [1, 2, 1], [1, 2, 1], [1, 2, 1], [1, 2, 1], [1, 2, 1], [1, 2, 1], [1, 2, 1], [1, 2, 1], [1, 2, 1], [1, 2, 1], [1, 2, 1], [1, 2, 1], [1, 2, 1], [1, 2, 1], [1, 2, 1], [1, 2, 1], [1, 2, 1], [1, 2, 1], [1, 2, 1], [1, 2, 1]]) entity_initializer = lambda t: torch.as_tensor(np_array, dtype=torch.float32) model = RGCN(triples_factory=training, entity_representations=entity_initializer) optimizer = Adam(params=model.get_grad_params()) training_loop = SLCWATrainingLoop( model=model, triples_factory=training, optimizer=optimizer, ) # train training_loop.train( triples_factory=training, num_epochs=500, batch_size=256, ) # evaluate evaluator = RankBasedEvaluator(ks=[50]) mapped_triples = testing.mapped_triples foo = evaluator.evaluate(model=model,