Exemplo n.º 1
0
def test_data_loader():
    distorter = distorters.NoDistorter()
    dataset_plain = datasets.NormalizedPairedPoseDataset(
        'unit_test/dummy42', distorter, norm.NoNorm, True, dataset_size,
        'cuda:0')
    dataset_dict = datasets.NormalizedPairedPoseDataset(
        'unit_test/dummy42_dict', distorter, norm.NoNorm, True, dataset_size,
        'cuda:0')
    true_sum_sub1 = dataset_dict[:].poses.sum()
    dataset_dict.select_subset('sub2')
    true_sum_sub2 = dataset_dict[:].poses.sum()

    data_loader_plain = datasets.DataLoader(dataset_plain, 6)
    data_loader_dict = datasets.DataLoader(dataset_dict, 6)

    plain_batch = next(iter(data_loader_plain))
    subset_names_plain = data_loader_plain.get_subset_names()
    data_loader_plain.select_subset(subset_names_plain[0])

    all_batches = {}
    sum_of_subsets = {}
    for subset_name in data_loader_dict.get_subset_names():
        data_loader_dict.select_subset(subset_name)
        all_batches[subset_name] = list(data_loader_dict)
        sum_of_subsets[subset_name] = sum(
            batch.poses.sum() for batch in all_batches[subset_name])

    assert type(plain_batch) is datasets.PoseCorrectionBatch
    assert subset_names_plain == ['DEFAULT']
    assert list(all_batches.keys()) == ['sub1', 'sub2']
    assert len(all_batches['sub1']) == 7
    assert type(all_batches['sub1'][0]) == datasets.PoseCorrectionBatch
    assert all_batches['sub1'][0].labels.shape == (6, 21, 3)
    assert torch.allclose(sum_of_subsets['sub1'], true_sum_sub1)
    assert torch.allclose(sum_of_subsets['sub2'], true_sum_sub2)
Exemplo n.º 2
0
def test_training_session():
    training_set = datasets.NormalizedPairedPoseDataset('unit_test/dummy42',
                                                        distorter,
                                                        norm.NoNorm,
                                                        False,
                                                        None,
                                                        device='cuda:0')
    validation_set = datasets.NormalizedPairedPoseDataset(
        'unit_test/dummy42_dict',
        distorters.NoDistorter(),
        norm.NoNorm,
        True,
        None,
        device='cuda:0')
    training_batch = training_set[:]
    val_loader = datasets.DataLoader(validation_set, batch_size=6)

    training_session = TrainingSession(model, hyperparams, norm.NoNorm)
    training_session.schedule_learning_rate()
    loss, result = training_session.train_batch(training_batch)
    test_results = training_session.test_model(val_loader)

    assert loss.numel() == 1
    assert loss.device == torch.device('cpu')
    assert training_batch.poses.is_same_size(result.poses)
    assert list(test_results.keys()) == ['sub1', 'sub2']
    assert list(test_results['sub1'].keys()) == Evaluator.metric_names
    assert test_results['sub1']['distance'].numel() == 1
Exemplo n.º 3
0
def test_solver():
    training_set = datasets.NormalizedPairedPoseDataset('unit_test/dummy42',
                                                        distorter,
                                                        norm.NoNorm,
                                                        False,
                                                        None,
                                                        device='cuda:0')
    validation_set = datasets.NormalizedPairedPoseDataset(
        'unit_test/dummy42_dict',
        distorters.NoDistorter(),
        norm.NoNorm,
        True,
        None,
        device='cuda:0')

    solver_params = {
        'log_frequency':
        2,
        'log_loss':
        True,
        'log_grad':
        True,
        'verbose':
        False,
        'show_plots':
        False,
        'num_epochs':
        5,
        'batch_size':
        6,
        'interest_keys': [(None, 'loss_function'),
                          ('optimizer_args', 'weight_decay'),
                          ('optimizer_args', 'lr')],
        'val_example_indices': [0, 1],
        'val_example_subset':
        'sub1'
    }

    solver = Solver(solver_params, training_set, validation_set)
    log, eval_results, weights, example_predictions = solver.train(
        model, hyperparams)

    assert len(log['train']['loss']) == 35
    assert len(log['val']['sub1'][Evaluator.metric_names[0]]) == 10
    assert example_predictions.shape == (10, 2, 21, 3)
    assert type(eval_results) is dict
    assert list(eval_results.keys()) == ['default', 'original']
    assert log['train']['loss'][0].device == torch.device('cpu')
    assert log['train']['distance'][0].device == torch.device('cpu')
    assert log['val']['sub1']['distance'][0].device == torch.device('cpu')
Exemplo n.º 4
0
def test_to_model():
    distorter = distorters.NoDistorter()
    model = helpers.DummyModel()
    dataset_no_subs = datasets.NormalizedPairedPoseDataset('unit_test/dummy42',
                                                           distorter,
                                                           norm.NoNorm,
                                                           False,
                                                           device='cuda:0')
    dataset_subs = datasets.NormalizedPairedPoseDataset('unit_test/ident42',
                                                        distorter,
                                                        norm.NoNorm,
                                                        True,
                                                        device='cuda:0')
    data_loader_no_subs = datasets.DataLoader(dataset_no_subs, 6)
    data_loader_subs = datasets.DataLoader(dataset_subs, 6)

    batch_size = 42
    true_results = {
        'coord_diff': torch.zeros(batch_size, device='cuda:0'),
        'distance': torch.zeros(batch_size, device='cuda:0'),
        'bone_length': torch.zeros(batch_size, device='cuda:0'),
        'proportion': torch.zeros(batch_size, device='cuda:0'),
    }

    results_norm_no_subs = Evaluator.to_model(data_loader_no_subs, model)
    results_orig_no_subs = Evaluator.to_model(data_loader_no_subs,
                                              model,
                                              space='original')
    results_norm_subs = Evaluator.to_model(data_loader_subs, model)
    results_orig_subs = Evaluator.to_model(data_loader_subs,
                                           model,
                                           space='original')

    for metric_name in Evaluator.metric_names:
        for subset_name in ['sub1', 'sub2']:
            assert torch.allclose(results_norm_subs[subset_name][metric_name],
                                  true_results[metric_name],
                                  atol=1e-5)
            assert torch.allclose(results_orig_subs[subset_name][metric_name],
                                  true_results[metric_name],
                                  atol=1e-5)
        assert torch.allclose(results_norm_no_subs['DEFAULT'][metric_name],
                              true_results[metric_name],
                              atol=1e-5)
        assert torch.allclose(results_orig_no_subs['DEFAULT'][metric_name],
                              true_results[metric_name],
                              atol=1e-5)
Exemplo n.º 5
0
def test_normalized_paired_pose_dataset():
    plain_size = 20
    some_size = 10
    distorter = distorters.SyntheticDistorter(distorter_config)

    dataset_plain = datasets.NormalizedPairedPoseDataset(
        'unit_test/dummy42', distorter, norm.NoNorm, False, plain_size, 'cpu')
    dataset_dict = datasets.NormalizedPairedPoseDataset(
        'unit_test/dummy42_dict',
        distorter,
        norm.NoNorm,
        True,
        device='cuda:0')

    some = dataset_plain[:some_size]
    single_sub1 = dataset_dict[0]
    dataset_dict.select_subset('sub2')
    single_sub2 = dataset_dict[1]

    assert type(some) is datasets.PoseCorrectionBatch
    assert len(dataset_plain) == plain_size
    assert len(dataset_dict) == dataset_size
    assert some.poses.shape == (some_size, 21, 3)
    assert some.poses.dtype == torch.float32
    assert some.poses.is_same_size(some.labels)
    assert some.poses.is_same_size(some.original_poses)
    assert some.poses.is_same_size(some.original_labels)
    assert some.normalization_params is not None
    assert some.device == torch.device('cpu')
    assert dataset_dict.device == torch.device('cuda:0')
    assert single_sub1.device == torch.device('cuda:0')
    assert single_sub1.poses.shape == (1, 21, 3)
    assert not torch.allclose(single_sub1.poses, single_sub2.poses)
    assert not torch.allclose(single_sub1.poses, single_sub1.labels)
    assert dataset_dict.get_subset_names() == ['sub1', 'sub2']
    assert dataset_dict.has_subsets
Exemplo n.º 6
0
    def run(self):
        distorter = self.base_hyperparams['distorter'](self.base_hyperparams['distorter_args'])
        if self.config['experiment']['normalizer'] is not None:
            train_data = datasets.NormalizedPairedPoseDataset(
                self.config['experiment']['train_set'],
                distorter,
                self.config['experiment']['normalizer'],
                self.config['experiment']['use_preset'],
                self.config['experiment']['train_set_size'],
                self.config['experiment']['target_device'])
            val_data = datasets.NormalizedPairedPoseDataset(
                self.config['experiment']['val_set'],
                distorters.NoDistorter(),
                self.config['experiment']['normalizer'],
                True,
                self.config['experiment']['val_set_size'],
                self.config['experiment']['target_device'])
        else:
            train_data = datasets.PairedPoseDataset(self.config['experiment']['train_set'],
                                                    distorter,
                                                    self.config['experiment']['use_preset'],
                                                    self.config['experiment']['train_set_size'],
                                                    self.config['experiment']['target_device'])
            val_data = datasets.PairedPoseDataset(self.config['experiment']['val_set'],
                                                  distorters.NoDistorter(),
                                                  True,
                                                  self.config['experiment']['val_set_size'],
                                                  self.config['experiment']['target_device'])

        self.config['experiment']['train_set_size'] = len(train_data)
        self.config['experiment']['val_set_size'] = len(val_data)

        experiment_dir = os.path.join('results', self.config['experiment']['name'])
        os.mkdir(experiment_dir)
        torch.save(self.config, os.path.join(experiment_dir, 'config.pt'))

        solver = Solver(self.config['solver'], train_data, val_data)
        combinations_of_configs = self._generate_combinations()

        for i, hyperparams in enumerate(combinations_of_configs):
            print('\n\n' + '#' * 100)
            print('START OF SESSION {}/{}'.format(i + 1, len(combinations_of_configs)))

            results_dir = os.path.join(experiment_dir, str(i))
            os.mkdir(results_dir)
            torch.save(hyperparams, os.path.join(results_dir, 'params.pt'))

            distorter = hyperparams['distorter'](hyperparams['distorter_args'])
            train_data.distorter = distorter

            for j in range(self.config['experiment']['n_repetitions']):
                print('\nRepetition {}/{}  ({}):'.format(j + 1,
                                                         self.config['experiment']['n_repetitions'],
                                                         self.config['experiment']['name']))
                print('*' * 50)

                if self.config['experiment']['deterministic_mode']:
                    torch.manual_seed(0)

                model = self._create_model_and_normalizer(hyperparams)

                log, eval_results, weights, example_predictions = solver.train(model, hyperparams)

                repetition_dir = os.path.join(results_dir, str(j))
                os.mkdir(repetition_dir)

                torch.save(log, os.path.join(repetition_dir, 'log.pt'))
                torch.save(eval_results, os.path.join(repetition_dir, 'eval.pt'))
                torch.save(weights, os.path.join(repetition_dir, 'weights.pt'))
                torch.save(example_predictions, os.path.join(repetition_dir, 'examples.pt'))

        print('\nExperiment >> {} << finished.\n'.format(self.config['experiment']['name']))
Exemplo n.º 7
0
cont = False
max_iter = 2500
cheat_metric = False

print(
    'Starting TSNE analysis with: \n\t{}\n\t{}\n\t{}\n\t{}\n\t{}\n\t{}\n\t{}\n\t{}'
    .format(dataset_names, eval_spaces, subset_name, error_name, perplexities,
            cont, max_iter, cheat_metric))

########################################################################

for dataset_name in dataset_names:
    for eval_space in eval_spaces:
        for perplexity in perplexities:
            if eval_space == 'normalized':
                dataset = datasets.NormalizedPairedPoseDataset(
                    dataset_name, distorters.NoDistorter(), True)
            else:
                dataset = datasets.PairedPoseDataset(dataset_name,
                                                     distorters.NoDistorter(),
                                                     True)
            dataset.select_subset(subset_name)

            all_data = dataset[:]

            if error_name == 'distance':
                data = errors.distance_error(all_data.poses, all_data.labels)
            elif error_name == 'bone_length':
                data = errors.bone_length_error(all_data.poses,
                                                all_data.labels)
            elif error_name == 'dist_bone_cat':
                distance_errors = errors.distance_error(