Example #1
0
def test_knn_predefined_distorter():
    config = {
        'source_name': os.path.join('unit_test', 'dummy42'),
        'knn_name': os.path.join('unit_test', 'dummy42'),
        'strength_alpha': -4.0,
        'strength_loc': 0.85,
        'strength_scale': 0.01,
        'max_k': 2,
        'device': 'cuda:0',
        'stds': [3.0, 5.0, 10.0],
        'layer_probs': [0.7, 0.25, 0.05],
        'layer_radii': [0.0, 6.0, 7.0],
        'confusion_prob': 0.02
    }
    distorter = distorters.KNNPredefinedDistorter(config)

    distort_dataset = datasets.PairedPoseDataset(os.path.join(
        'unit_test', 'dummy42'),
                                                 distorter,
                                                 False,
                                                 device='cuda:0')
    no_distort_dataset = datasets.PairedPoseDataset(os.path.join(
        'unit_test', 'dummy42'),
                                                    distorters.NoDistorter(),
                                                    True,
                                                    device='cuda:0')

    distort_batch = distort_dataset[:]
    no_distort_batch = no_distort_dataset[:]

    assert not torch.allclose(distort_batch.poses, no_distort_batch.poses)
    assert torch.equal(distort_batch.labels, no_distort_batch.labels)
Example #2
0
    def __init__(self,
                 name,
                 distorter=distorters.NoDistorter(),
                 use_preset=False,
                 num_samples=None,
                 device='cpu'):
        super().__init__(name + '_labels', device)

        self.distorter = distorter

        label_data = self._load_from_disk(name + '_labels', device)
        if type(label_data) is dict:
            raise TypeError('Subsets for labels are not supported.')
        self.labels = self._shorten_and_to_device(label_data, num_samples,
                                                  device)

        self.use_preset = use_preset
        if use_preset:
            pose_data = self._load_from_disk(name + '_poses', device)
            if type(pose_data) is dict:
                self.pose_subsets = self._shorten_and_to_device(
                    pose_data, num_samples, device)
                self.select_subset(self.get_subset_names()[0])
            else:
                self.poses = self._shorten_and_to_device(
                    pose_data, num_samples, device)
        else:
            self.poses = self.labels

        if len(self.poses) != len(self.labels):
            raise RuntimeError('Different number of poses and labels.')
Example #3
0
def test_data_loader():
    distorter = distorters.NoDistorter()
    dataset_plain = datasets.NormalizedPairedPoseDataset(
        'unit_test/dummy42', distorter, norm.NoNorm, True, dataset_size,
        'cuda:0')
    dataset_dict = datasets.NormalizedPairedPoseDataset(
        'unit_test/dummy42_dict', distorter, norm.NoNorm, True, dataset_size,
        'cuda:0')
    true_sum_sub1 = dataset_dict[:].poses.sum()
    dataset_dict.select_subset('sub2')
    true_sum_sub2 = dataset_dict[:].poses.sum()

    data_loader_plain = datasets.DataLoader(dataset_plain, 6)
    data_loader_dict = datasets.DataLoader(dataset_dict, 6)

    plain_batch = next(iter(data_loader_plain))
    subset_names_plain = data_loader_plain.get_subset_names()
    data_loader_plain.select_subset(subset_names_plain[0])

    all_batches = {}
    sum_of_subsets = {}
    for subset_name in data_loader_dict.get_subset_names():
        data_loader_dict.select_subset(subset_name)
        all_batches[subset_name] = list(data_loader_dict)
        sum_of_subsets[subset_name] = sum(
            batch.poses.sum() for batch in all_batches[subset_name])

    assert type(plain_batch) is datasets.PoseCorrectionBatch
    assert subset_names_plain == ['DEFAULT']
    assert list(all_batches.keys()) == ['sub1', 'sub2']
    assert len(all_batches['sub1']) == 7
    assert type(all_batches['sub1'][0]) == datasets.PoseCorrectionBatch
    assert all_batches['sub1'][0].labels.shape == (6, 21, 3)
    assert torch.allclose(sum_of_subsets['sub1'], true_sum_sub1)
    assert torch.allclose(sum_of_subsets['sub2'], true_sum_sub2)
def test_training_session():
    training_set = datasets.NormalizedPairedPoseDataset('unit_test/dummy42',
                                                        distorter,
                                                        norm.NoNorm,
                                                        False,
                                                        None,
                                                        device='cuda:0')
    validation_set = datasets.NormalizedPairedPoseDataset(
        'unit_test/dummy42_dict',
        distorters.NoDistorter(),
        norm.NoNorm,
        True,
        None,
        device='cuda:0')
    training_batch = training_set[:]
    val_loader = datasets.DataLoader(validation_set, batch_size=6)

    training_session = TrainingSession(model, hyperparams, norm.NoNorm)
    training_session.schedule_learning_rate()
    loss, result = training_session.train_batch(training_batch)
    test_results = training_session.test_model(val_loader)

    assert loss.numel() == 1
    assert loss.device == torch.device('cpu')
    assert training_batch.poses.is_same_size(result.poses)
    assert list(test_results.keys()) == ['sub1', 'sub2']
    assert list(test_results['sub1'].keys()) == Evaluator.metric_names
    assert test_results['sub1']['distance'].numel() == 1
Example #5
0
    def __init__(self,
                 name,
                 distorter=distorters.NoDistorter(),
                 normalizer=norm.NoNorm,
                 use_preset=False,
                 num_samples=None,
                 device='cpu'):
        super().__init__(name, distorter, use_preset, num_samples, device)

        poses = self._batchify(self.poses)
        labels = self._batchify(self.labels)
        self.normalizer = normalizer
        self.norm_poses, self.norm_labels, self.norm_params = normalizer.normalize_pair(
            poses, labels)
def test_solver():
    training_set = datasets.NormalizedPairedPoseDataset('unit_test/dummy42',
                                                        distorter,
                                                        norm.NoNorm,
                                                        False,
                                                        None,
                                                        device='cuda:0')
    validation_set = datasets.NormalizedPairedPoseDataset(
        'unit_test/dummy42_dict',
        distorters.NoDistorter(),
        norm.NoNorm,
        True,
        None,
        device='cuda:0')

    solver_params = {
        'log_frequency':
        2,
        'log_loss':
        True,
        'log_grad':
        True,
        'verbose':
        False,
        'show_plots':
        False,
        'num_epochs':
        5,
        'batch_size':
        6,
        'interest_keys': [(None, 'loss_function'),
                          ('optimizer_args', 'weight_decay'),
                          ('optimizer_args', 'lr')],
        'val_example_indices': [0, 1],
        'val_example_subset':
        'sub1'
    }

    solver = Solver(solver_params, training_set, validation_set)
    log, eval_results, weights, example_predictions = solver.train(
        model, hyperparams)

    assert len(log['train']['loss']) == 35
    assert len(log['val']['sub1'][Evaluator.metric_names[0]]) == 10
    assert example_predictions.shape == (10, 2, 21, 3)
    assert type(eval_results) is dict
    assert list(eval_results.keys()) == ['default', 'original']
    assert log['train']['loss'][0].device == torch.device('cpu')
    assert log['train']['distance'][0].device == torch.device('cpu')
    assert log['val']['sub1']['distance'][0].device == torch.device('cpu')
def test_to_model():
    distorter = distorters.NoDistorter()
    model = helpers.DummyModel()
    dataset_no_subs = datasets.NormalizedPairedPoseDataset('unit_test/dummy42',
                                                           distorter,
                                                           norm.NoNorm,
                                                           False,
                                                           device='cuda:0')
    dataset_subs = datasets.NormalizedPairedPoseDataset('unit_test/ident42',
                                                        distorter,
                                                        norm.NoNorm,
                                                        True,
                                                        device='cuda:0')
    data_loader_no_subs = datasets.DataLoader(dataset_no_subs, 6)
    data_loader_subs = datasets.DataLoader(dataset_subs, 6)

    batch_size = 42
    true_results = {
        'coord_diff': torch.zeros(batch_size, device='cuda:0'),
        'distance': torch.zeros(batch_size, device='cuda:0'),
        'bone_length': torch.zeros(batch_size, device='cuda:0'),
        'proportion': torch.zeros(batch_size, device='cuda:0'),
    }

    results_norm_no_subs = Evaluator.to_model(data_loader_no_subs, model)
    results_orig_no_subs = Evaluator.to_model(data_loader_no_subs,
                                              model,
                                              space='original')
    results_norm_subs = Evaluator.to_model(data_loader_subs, model)
    results_orig_subs = Evaluator.to_model(data_loader_subs,
                                           model,
                                           space='original')

    for metric_name in Evaluator.metric_names:
        for subset_name in ['sub1', 'sub2']:
            assert torch.allclose(results_norm_subs[subset_name][metric_name],
                                  true_results[metric_name],
                                  atol=1e-5)
            assert torch.allclose(results_orig_subs[subset_name][metric_name],
                                  true_results[metric_name],
                                  atol=1e-5)
        assert torch.allclose(results_norm_no_subs['DEFAULT'][metric_name],
                              true_results[metric_name],
                              atol=1e-5)
        assert torch.allclose(results_orig_no_subs['DEFAULT'][metric_name],
                              true_results[metric_name],
                              atol=1e-5)
Example #8
0
    def run(self):
        distorter = self.base_hyperparams['distorter'](self.base_hyperparams['distorter_args'])
        if self.config['experiment']['normalizer'] is not None:
            train_data = datasets.NormalizedPairedPoseDataset(
                self.config['experiment']['train_set'],
                distorter,
                self.config['experiment']['normalizer'],
                self.config['experiment']['use_preset'],
                self.config['experiment']['train_set_size'],
                self.config['experiment']['target_device'])
            val_data = datasets.NormalizedPairedPoseDataset(
                self.config['experiment']['val_set'],
                distorters.NoDistorter(),
                self.config['experiment']['normalizer'],
                True,
                self.config['experiment']['val_set_size'],
                self.config['experiment']['target_device'])
        else:
            train_data = datasets.PairedPoseDataset(self.config['experiment']['train_set'],
                                                    distorter,
                                                    self.config['experiment']['use_preset'],
                                                    self.config['experiment']['train_set_size'],
                                                    self.config['experiment']['target_device'])
            val_data = datasets.PairedPoseDataset(self.config['experiment']['val_set'],
                                                  distorters.NoDistorter(),
                                                  True,
                                                  self.config['experiment']['val_set_size'],
                                                  self.config['experiment']['target_device'])

        self.config['experiment']['train_set_size'] = len(train_data)
        self.config['experiment']['val_set_size'] = len(val_data)

        experiment_dir = os.path.join('results', self.config['experiment']['name'])
        os.mkdir(experiment_dir)
        torch.save(self.config, os.path.join(experiment_dir, 'config.pt'))

        solver = Solver(self.config['solver'], train_data, val_data)
        combinations_of_configs = self._generate_combinations()

        for i, hyperparams in enumerate(combinations_of_configs):
            print('\n\n' + '#' * 100)
            print('START OF SESSION {}/{}'.format(i + 1, len(combinations_of_configs)))

            results_dir = os.path.join(experiment_dir, str(i))
            os.mkdir(results_dir)
            torch.save(hyperparams, os.path.join(results_dir, 'params.pt'))

            distorter = hyperparams['distorter'](hyperparams['distorter_args'])
            train_data.distorter = distorter

            for j in range(self.config['experiment']['n_repetitions']):
                print('\nRepetition {}/{}  ({}):'.format(j + 1,
                                                         self.config['experiment']['n_repetitions'],
                                                         self.config['experiment']['name']))
                print('*' * 50)

                if self.config['experiment']['deterministic_mode']:
                    torch.manual_seed(0)

                model = self._create_model_and_normalizer(hyperparams)

                log, eval_results, weights, example_predictions = solver.train(model, hyperparams)

                repetition_dir = os.path.join(results_dir, str(j))
                os.mkdir(repetition_dir)

                torch.save(log, os.path.join(repetition_dir, 'log.pt'))
                torch.save(eval_results, os.path.join(repetition_dir, 'eval.pt'))
                torch.save(weights, os.path.join(repetition_dir, 'weights.pt'))
                torch.save(example_predictions, os.path.join(repetition_dir, 'examples.pt'))

        print('\nExperiment >> {} << finished.\n'.format(self.config['experiment']['name']))
Example #9
0
max_iter = 2500
cheat_metric = False

print(
    'Starting TSNE analysis with: \n\t{}\n\t{}\n\t{}\n\t{}\n\t{}\n\t{}\n\t{}\n\t{}'
    .format(dataset_names, eval_spaces, subset_name, error_name, perplexities,
            cont, max_iter, cheat_metric))

########################################################################

for dataset_name in dataset_names:
    for eval_space in eval_spaces:
        for perplexity in perplexities:
            if eval_space == 'normalized':
                dataset = datasets.NormalizedPairedPoseDataset(
                    dataset_name, distorters.NoDistorter(), True)
            else:
                dataset = datasets.PairedPoseDataset(dataset_name,
                                                     distorters.NoDistorter(),
                                                     True)
            dataset.select_subset(subset_name)

            all_data = dataset[:]

            if error_name == 'distance':
                data = errors.distance_error(all_data.poses, all_data.labels)
            elif error_name == 'bone_length':
                data = errors.bone_length_error(all_data.poses,
                                                all_data.labels)
            elif error_name == 'dist_bone_cat':
                distance_errors = errors.distance_error(