Ejemplo n.º 1
0
def test_paired_pose_dataset():
    plain_size = 20
    some_size = 10
    distorter = distorters.SyntheticDistorter(distorter_config)

    dataset_plain = datasets.PairedPoseDataset('unit_test/dummy42', distorter,
                                               False, plain_size, 'cpu')
    dataset_dict = datasets.PairedPoseDataset('unit_test/dummy42_dict',
                                              distorter,
                                              True,
                                              device='cuda:0')

    some = dataset_plain[:some_size]
    single_sub1 = dataset_dict[0]
    dataset_dict.select_subset('sub2')
    single_sub2 = dataset_dict[1]

    assert type(some) is datasets.PoseCorrectionBatch
    assert len(dataset_plain) == plain_size
    assert len(dataset_dict) == dataset_size
    assert some.poses.shape == (some_size, 21, 3)
    assert some.poses.dtype == torch.float32
    assert some.poses.is_same_size(some.labels)
    assert some.device == torch.device('cpu')
    assert dataset_dict.device == torch.device('cuda:0')
    assert single_sub1.device == torch.device('cuda:0')
    assert single_sub1.poses.shape == (1, 21, 3)
    assert not torch.allclose(single_sub1.poses, single_sub2.poses)
    assert not torch.allclose(single_sub1.poses, single_sub1.labels)
    assert dataset_dict.get_subset_names() == ['sub1', 'sub2']
    assert dataset_dict.has_subsets
Ejemplo n.º 2
0
def test_knn_predefined_distorter():
    config = {
        'source_name': os.path.join('unit_test', 'dummy42'),
        'knn_name': os.path.join('unit_test', 'dummy42'),
        'strength_alpha': -4.0,
        'strength_loc': 0.85,
        'strength_scale': 0.01,
        'max_k': 2,
        'device': 'cuda:0',
        'stds': [3.0, 5.0, 10.0],
        'layer_probs': [0.7, 0.25, 0.05],
        'layer_radii': [0.0, 6.0, 7.0],
        'confusion_prob': 0.02
    }
    distorter = distorters.KNNPredefinedDistorter(config)

    distort_dataset = datasets.PairedPoseDataset(os.path.join(
        'unit_test', 'dummy42'),
                                                 distorter,
                                                 False,
                                                 device='cuda:0')
    no_distort_dataset = datasets.PairedPoseDataset(os.path.join(
        'unit_test', 'dummy42'),
                                                    distorters.NoDistorter(),
                                                    True,
                                                    device='cuda:0')

    distort_batch = distort_dataset[:]
    no_distort_batch = no_distort_dataset[:]

    assert not torch.allclose(distort_batch.poses, no_distort_batch.poses)
    assert torch.equal(distort_batch.labels, no_distort_batch.labels)
Ejemplo n.º 3
0
    def run(self):
        distorter = self.base_hyperparams['distorter'](self.base_hyperparams['distorter_args'])
        if self.config['experiment']['normalizer'] is not None:
            train_data = datasets.NormalizedPairedPoseDataset(
                self.config['experiment']['train_set'],
                distorter,
                self.config['experiment']['normalizer'],
                self.config['experiment']['use_preset'],
                self.config['experiment']['train_set_size'],
                self.config['experiment']['target_device'])
            val_data = datasets.NormalizedPairedPoseDataset(
                self.config['experiment']['val_set'],
                distorters.NoDistorter(),
                self.config['experiment']['normalizer'],
                True,
                self.config['experiment']['val_set_size'],
                self.config['experiment']['target_device'])
        else:
            train_data = datasets.PairedPoseDataset(self.config['experiment']['train_set'],
                                                    distorter,
                                                    self.config['experiment']['use_preset'],
                                                    self.config['experiment']['train_set_size'],
                                                    self.config['experiment']['target_device'])
            val_data = datasets.PairedPoseDataset(self.config['experiment']['val_set'],
                                                  distorters.NoDistorter(),
                                                  True,
                                                  self.config['experiment']['val_set_size'],
                                                  self.config['experiment']['target_device'])

        self.config['experiment']['train_set_size'] = len(train_data)
        self.config['experiment']['val_set_size'] = len(val_data)

        experiment_dir = os.path.join('results', self.config['experiment']['name'])
        os.mkdir(experiment_dir)
        torch.save(self.config, os.path.join(experiment_dir, 'config.pt'))

        solver = Solver(self.config['solver'], train_data, val_data)
        combinations_of_configs = self._generate_combinations()

        for i, hyperparams in enumerate(combinations_of_configs):
            print('\n\n' + '#' * 100)
            print('START OF SESSION {}/{}'.format(i + 1, len(combinations_of_configs)))

            results_dir = os.path.join(experiment_dir, str(i))
            os.mkdir(results_dir)
            torch.save(hyperparams, os.path.join(results_dir, 'params.pt'))

            distorter = hyperparams['distorter'](hyperparams['distorter_args'])
            train_data.distorter = distorter

            for j in range(self.config['experiment']['n_repetitions']):
                print('\nRepetition {}/{}  ({}):'.format(j + 1,
                                                         self.config['experiment']['n_repetitions'],
                                                         self.config['experiment']['name']))
                print('*' * 50)

                if self.config['experiment']['deterministic_mode']:
                    torch.manual_seed(0)

                model = self._create_model_and_normalizer(hyperparams)

                log, eval_results, weights, example_predictions = solver.train(model, hyperparams)

                repetition_dir = os.path.join(results_dir, str(j))
                os.mkdir(repetition_dir)

                torch.save(log, os.path.join(repetition_dir, 'log.pt'))
                torch.save(eval_results, os.path.join(repetition_dir, 'eval.pt'))
                torch.save(weights, os.path.join(repetition_dir, 'weights.pt'))
                torch.save(example_predictions, os.path.join(repetition_dir, 'examples.pt'))

        print('\nExperiment >> {} << finished.\n'.format(self.config['experiment']['name']))
Ejemplo n.º 4
0
"""
For a given dataset (with pair of pose and label files available) extract the distortions and save
them to a file.
No normalization is performed.
"""

import os
import torch

from data_utils import datasets

# Config
dataset_name = 'HANDS17_DPREN_SubjClust_train'
########################################################################

dataset = datasets.PairedPoseDataset(dataset_name, use_preset=True)
distortion_vectors = dataset.poses - dataset.labels

torch.save(distortion_vectors,
           os.path.join('data', 'distortions', '{}.pt'.format(dataset_name)))
Ejemplo n.º 5
0
"""
Quick verification whether a newly generated dataset (splitted) looks "reasonable".
"""

import torch

from data_utils import datasets

# Config
dataset_name = 'HANDS17_DPREN_SubjClust'
whole_dataset_name = 'HANDS17_DPREN_all'
########################################################################

train_set = datasets.PairedPoseDataset(dataset_name + '_train', use_preset=True)
val_set = datasets.PairedPoseDataset(dataset_name + '_val', use_preset=True)
whole_set = datasets.PairedPoseDataset(whole_dataset_name, use_preset=True)

train_distance_error = torch.norm(train_set[:].poses - train_set[:].labels, dim=2).mean()
val_distance_error = torch.norm(val_set[:].poses - val_set[:].labels, dim=2).mean()

assert len(train_set) + len(val_set) == len(whole_set)
assert train_distance_error > 1e-3
assert val_distance_error > 1e-3
assert train_distance_error != val_distance_error
assert not torch.allclose(train_set[0].labels, val_set[0].labels)

print('Looks good!')
Ejemplo n.º 6
0
print(
    'Starting TSNE analysis with: \n\t{}\n\t{}\n\t{}\n\t{}\n\t{}\n\t{}\n\t{}\n\t{}'
    .format(dataset_names, eval_spaces, subset_name, error_name, perplexities,
            cont, max_iter, cheat_metric))

########################################################################

for dataset_name in dataset_names:
    for eval_space in eval_spaces:
        for perplexity in perplexities:
            if eval_space == 'normalized':
                dataset = datasets.NormalizedPairedPoseDataset(
                    dataset_name, distorters.NoDistorter(), True)
            else:
                dataset = datasets.PairedPoseDataset(dataset_name,
                                                     distorters.NoDistorter(),
                                                     True)
            dataset.select_subset(subset_name)

            all_data = dataset[:]

            if error_name == 'distance':
                data = errors.distance_error(all_data.poses, all_data.labels)
            elif error_name == 'bone_length':
                data = errors.bone_length_error(all_data.poses,
                                                all_data.labels)
            elif error_name == 'dist_bone_cat':
                distance_errors = errors.distance_error(
                    all_data.poses, all_data.labels)
                bone_length_errors = errors.bone_length_error(
                    all_data.poses, all_data.labels)
Ejemplo n.º 7
0
"""
Artificially extract a smaller training set from a big one for experiments that simulate a lack of
data.
"""

import os
import torch

from data_utils import datasets

# Config
dataset_name = 'HANDS17_DPREN_SubjClust'
small_set_size_percent = 10
########################################################################

dataset = datasets.PairedPoseDataset(dataset_name + '_train', use_preset=True)

small_set_size = int(small_set_size_percent / 100 * len(dataset))
subset_batch = dataset[:small_set_size]

output_name = '{}_small{}_train'.format(dataset_name, small_set_size_percent)
torch.save(subset_batch.poses.clone(),
           os.path.join('data', output_name + '_poses.pt'))
torch.save(subset_batch.labels.clone(),
           os.path.join('data', output_name + '_labels.pt'))