Esempio n. 1
0
    def _intermediate_eval(self, session, batch):
        if session.params['eval_space'] == 'original':
            batch.original_poses = self.normalizer.denormalize(
                batch.poses, batch.normalization_params)
        train_results = {
            'DEFAULT': Evaluator.to_batch(batch, session.params['eval_space'])
        }
        train_mean_results = Evaluator.means_per_metric(train_results)
        Evaluator.results_to_cpu(train_mean_results)

        val_mean_results = session.test_model(self.val_loader)
        Evaluator.results_to_cpu(val_mean_results)

        return train_mean_results, val_mean_results
Esempio n. 2
0
def test_to_batch():
    batch_size = 42
    poses = torch.rand(batch_size, 21, 3)
    labels = poses + torch.ones(batch_size, 21, 3)
    batch = datasets.PoseCorrectionBatch(poses, labels, poses, labels)

    true_results = {
        'coord_diff': torch.ones(batch_size),
        'distance': torch.sqrt(3.0 * torch.ones(batch_size)),
        'bone_length': torch.zeros(batch_size),
        'proportion': torch.zeros(batch_size),
    }

    results_norm = Evaluator.to_batch(batch)
    results_orig = Evaluator.to_batch(batch, space='original')

    for metric_name in Evaluator.metric_names:
        assert torch.allclose(results_norm[metric_name],
                              true_results[metric_name],
                              atol=1e-6)
        assert torch.allclose(results_orig[metric_name],
                              true_results[metric_name],
                              atol=1e-6)