示例#1
0
    def setUp(self):
        """Prepare for testing the evaluation filtering."""
        self.evaluator = RankBasedEvaluator(
            filtered=True, automatic_memory_optimization=False)
        self.triples_factory = Nations().training
        self.model = FixedModel(triples_factory=self.triples_factory)

        # The MockModel gives the highest score to the highest entity id
        max_score = self.triples_factory.num_entities - 1

        # The test triples are created to yield the third highest score on both head and tail prediction
        self.test_triples = torch.tensor([[max_score - 2, 0, max_score - 2]])

        # Write new mapped triples to the model, since the model's triples will be used to filter
        # These triples are created to yield the highest score on both head and tail prediction for the
        # test triple at hand
        self.training_triples = torch.tensor([
            [max_score - 2, 0, max_score],
            [max_score, 0, max_score - 2],
        ], )

        # The validation triples are created to yield the second highest score on both head and tail prediction for the
        # test triple at hand
        self.validation_triples = torch.tensor([
            [max_score - 2, 0, max_score - 1],
            [max_score - 1, 0, max_score - 2],
        ], )
示例#2
0
class TestEvaluationFiltering(unittest.TestCase):
    """Tests for testing the correct filtering of positive triples of the evaluation procedure."""
    def setUp(self):
        """Prepare for testing the evaluation filtering."""
        self.evaluator = RankBasedEvaluator(
            filtered=True, automatic_memory_optimization=False)
        self.triples_factory = Nations().training
        self.model = FixedModel(triples_factory=self.triples_factory)

        # The MockModel gives the highest score to the highest entity id
        max_score = self.triples_factory.num_entities - 1

        # The test triples are created to yield the third highest score on both head and tail prediction
        self.test_triples = torch.tensor([[max_score - 2, 0, max_score - 2]])

        # Write new mapped triples to the model, since the model's triples will be used to filter
        # These triples are created to yield the highest score on both head and tail prediction for the
        # test triple at hand
        self.training_triples = torch.tensor([
            [max_score - 2, 0, max_score],
            [max_score, 0, max_score - 2],
        ], )

        # The validation triples are created to yield the second highest score on both head and tail prediction for the
        # test triple at hand
        self.validation_triples = torch.tensor([
            [max_score - 2, 0, max_score - 1],
            [max_score - 1, 0, max_score - 2],
        ], )

    def test_evaluation_filtering_without_validation_triples(self):
        """Test if the evaluator's triple filtering works as expected."""
        eval_results = self.evaluator.evaluate(
            model=self.model,
            mapped_triples=self.test_triples,
            additional_filter_triples=self.training_triples,
            batch_size=1,
            use_tqdm=False,
        )
        assert eval_results.get_metric(
            name="mr") == 2, "The mean rank should equal 2"

    def test_evaluation_filtering_with_validation_triples(self):
        """Test if the evaluator's triple filtering works as expected when including additional filter triples."""
        eval_results = self.evaluator.evaluate(
            model=self.model,
            mapped_triples=self.test_triples,
            additional_filter_triples=[
                self.training_triples,
                self.validation_triples,
            ],
            batch_size=1,
            use_tqdm=False,
        )
        assert eval_results.get_metric(
            name="mr") == 1, "The rank should equal 1"
示例#3
0
 def setUp(self) -> None:
     """Prepare test instance."""
     evaluator = RankBasedEvaluator()
     evaluator.num_entities = self.num_entities
     evaluator.ranks = {(side, rank_type): [
         random.random()
         for _ in range(self.num_triples * (2 if side == SIDE_BOTH else 1))
     ]
                        for side, rank_type in itertools.product(
                            SIDES, RANK_TYPES | {RANK_EXPECTED_REALISTIC})}
     self.instance = evaluator.finalize()
示例#4
0
 def test_early_stopping(self):
     """Tests early stopping."""
     # Set automatic_memory_optimization to false during testing
     nations = Nations()
     model: Model = TransE(triples_factory=nations.training)
     evaluator = RankBasedEvaluator(automatic_memory_optimization=False)
     stopper = EarlyStopper(
         model=model,
         evaluator=evaluator,
         training_triples_factory=nations.training,
         evaluation_triples_factory=nations.validation,
         patience=self.patience,
         relative_delta=self.relative_delta,
         metric='mean_rank',
     )
     training_loop = SLCWATrainingLoop(
         model=model,
         triples_factory=nations.training,
         optimizer=Adam(params=model.get_grad_params()),
         automatic_memory_optimization=False,
     )
     losses = training_loop.train(
         triples_factory=nations.training,
         num_epochs=self.max_num_epochs,
         batch_size=self.batch_size,
         stopper=stopper,
         use_tqdm=False,
     )
     self.assertEqual(stopper.number_results,
                      len(losses) // stopper.frequency)
     self.assertEqual(self.stop_epoch,
                      len(losses),
                      msg='Did not stop early like it should have')
示例#5
0
                     [1, 2, 1], [1, 2, 1], [1, 2, 1], [1, 2, 1], [1, 2, 1],
                     [1, 2, 1], [1, 2, 1], [1, 2, 1], [1, 2, 1], [1, 2, 1],
                     [1, 2, 1], [1, 2, 1], [1, 2, 1], [1, 2, 1], [1, 2, 1],
                     [1, 2, 1]])

entity_initializer = lambda t: torch.as_tensor(np_array, dtype=torch.float32)

model = RGCN(triples_factory=training,
             entity_representations=entity_initializer)
optimizer = Adam(params=model.get_grad_params())
training_loop = SLCWATrainingLoop(
    model=model,
    triples_factory=training,
    optimizer=optimizer,
)

# train
training_loop.train(
    triples_factory=training,
    num_epochs=500,
    batch_size=256,
)

# evaluate
evaluator = RankBasedEvaluator(ks=[50])
mapped_triples = testing.mapped_triples

foo = evaluator.evaluate(model=model,
                         mapped_triples=mapped_triples,
                         batch_size=1024)