示例#1
0
 def evaluate_dataset(self, precompute_prefix=None, mode='mean'):
     if precompute_prefix is not None:
         path = self._construct_error_file_path(precompute_prefix)
         self.dataset_errors = torch.load(path)
     else:
         self.dataset_errors = {
             'default':
             Evaluator.to_dataset(self.data_loader,
                                  space='default',
                                  mode=mode)
         }
         if type(self.dataset) == datasets.NormalizedPairedPoseDataset:
             self.dataset_errors['original'] = Evaluator.to_dataset(
                 self.data_loader, space='original', mode=mode)
def test_to_dataset():
    distorter = distorters.NoDistorter()
    dataset_no_subs = datasets.NormalizedPairedPoseDataset('unit_test/dummy42',
                                                           distorter,
                                                           norm.NoNorm,
                                                           False,
                                                           device='cuda:0')
    dataset_subs = datasets.NormalizedPairedPoseDataset('unit_test/ident42',
                                                        distorter,
                                                        norm.NoNorm,
                                                        True,
                                                        device='cuda:0')
    data_loader_no_subs = datasets.DataLoader(dataset_no_subs, 6)
    data_loader_subs = datasets.DataLoader(dataset_subs, 6)

    batch_size = 42
    true_results = {
        'coord_diff': torch.zeros(batch_size, device='cuda:0'),
        'distance': torch.zeros(batch_size, device='cuda:0'),
        'bone_length': torch.zeros(batch_size, device='cuda:0'),
        'proportion': torch.zeros(batch_size, device='cuda:0'),
    }

    results_norm_no_subs = Evaluator.to_dataset(data_loader_no_subs)
    results_orig_no_subs = Evaluator.to_dataset(data_loader_no_subs,
                                                space='original')
    results_norm_subs = Evaluator.to_dataset(data_loader_subs)
    results_orig_subs = Evaluator.to_dataset(data_loader_subs,
                                             space='original')

    for metric_name in Evaluator.metric_names:
        for subset_name in ['sub1', 'sub2']:
            assert torch.allclose(results_norm_subs[subset_name][metric_name],
                                  true_results[metric_name],
                                  atol=1e-5)
            assert torch.allclose(results_orig_subs[subset_name][metric_name],
                                  true_results[metric_name],
                                  atol=1e-5)
        assert torch.allclose(results_norm_no_subs['DEFAULT'][metric_name],
                              true_results[metric_name],
                              atol=1e-5)
        assert torch.allclose(results_orig_no_subs['DEFAULT'][metric_name],
                              true_results[metric_name],
                              atol=1e-5)
"""
Compute the mean evaluation metrics on a dataset (before applying any corrections).
"""

import os
import torch

from data_utils import datasets
from evaluation.evaluator import Evaluator

# Config
dataset_name = 'HANDS17_DPREN_SubjClust_val'
########################################################################

dataset = datasets.PairedPoseDataset(dataset_name, use_preset=True)
data_loader = datasets.DataLoader(dataset, 100000)
results = Evaluator.means_per_metric(Evaluator.to_dataset(data_loader, 'default'))

torch.save(results, os.path.join('results', 'datasets', dataset_name + '.pt'))