예제 #1
0
 def evaluate_model(self, mode='mean'):
     self.errors = {
         'default':
         Evaluator.to_model(self.data_loader,
                            self.model,
                            space='default',
                            mode=mode)
     }
     if type(self.dataset) == datasets.NormalizedPairedPoseDataset:
         self.errors['original'] = Evaluator.to_model(self.data_loader,
                                                      self.model,
                                                      space='original',
                                                      mode=mode)
예제 #2
0
    def _full_evaluation(self, model, eval_space):
        default_results = Evaluator.means_per_metric(
            Evaluator.to_model(self.val_loader, model, 'default'))
        eval_results = {'default': default_results}

        if eval_space == 'original':
            original_results = Evaluator.means_per_metric(
                Evaluator.to_model(self.val_loader, model, 'original'))
            eval_results['original'] = original_results
        for eval_space_results in eval_results.values():
            Evaluator.results_to_cpu(eval_space_results)

        return eval_results
예제 #3
0
def test_to_model():
    distorter = distorters.NoDistorter()
    model = helpers.DummyModel()
    dataset_no_subs = datasets.NormalizedPairedPoseDataset('unit_test/dummy42',
                                                           distorter,
                                                           norm.NoNorm,
                                                           False,
                                                           device='cuda:0')
    dataset_subs = datasets.NormalizedPairedPoseDataset('unit_test/ident42',
                                                        distorter,
                                                        norm.NoNorm,
                                                        True,
                                                        device='cuda:0')
    data_loader_no_subs = datasets.DataLoader(dataset_no_subs, 6)
    data_loader_subs = datasets.DataLoader(dataset_subs, 6)

    batch_size = 42
    true_results = {
        'coord_diff': torch.zeros(batch_size, device='cuda:0'),
        'distance': torch.zeros(batch_size, device='cuda:0'),
        'bone_length': torch.zeros(batch_size, device='cuda:0'),
        'proportion': torch.zeros(batch_size, device='cuda:0'),
    }

    results_norm_no_subs = Evaluator.to_model(data_loader_no_subs, model)
    results_orig_no_subs = Evaluator.to_model(data_loader_no_subs,
                                              model,
                                              space='original')
    results_norm_subs = Evaluator.to_model(data_loader_subs, model)
    results_orig_subs = Evaluator.to_model(data_loader_subs,
                                           model,
                                           space='original')

    for metric_name in Evaluator.metric_names:
        for subset_name in ['sub1', 'sub2']:
            assert torch.allclose(results_norm_subs[subset_name][metric_name],
                                  true_results[metric_name],
                                  atol=1e-5)
            assert torch.allclose(results_orig_subs[subset_name][metric_name],
                                  true_results[metric_name],
                                  atol=1e-5)
        assert torch.allclose(results_norm_no_subs['DEFAULT'][metric_name],
                              true_results[metric_name],
                              atol=1e-5)
        assert torch.allclose(results_orig_no_subs['DEFAULT'][metric_name],
                              true_results[metric_name],
                              atol=1e-5)