Exemplo n.º 1
0
    def test_returns_same_acc_mean_on_same_model_when_random_seed_is_the_same(
            self, method):
        dataset = 'CUB'
        backbone = 'Conv4'

        args = dict(
            dataset=dataset,
            backbone=backbone,
            method=method,
            train_aug=True,
            n_iter=2,
            random_seed=1,
        )

        path_to_model = os.path.join(
            self.current_dir,
            path_to_step_output(dataset, backbone, method, 'tests_data'),
            'best_model.tar')
        model_state = FetchModel(path_to_model).apply()

        path_to_features = os.path.join(
            self.current_dir,
            path_to_step_output(dataset, backbone, method, 'tests_data'),
            'novel.hdf5')
        features, labels = load_features_and_labels_from_file(path_to_features)

        results_1 = MethodEvaluation(**args).apply(model_state,
                                                   (features, labels))
        results_2 = MethodEvaluation(**args).apply(model_state,
                                                   (features, labels))

        assert results_1 == results_2
Exemplo n.º 2
0
    def test_process_features_returns_correct_class_list_when_features_and_labels_are_provided(features, labels):
        features_per_label = MethodEvaluation('CUB')._process_features(features_and_labels=(features, labels))

        assert features_per_label.keys() == {0, 1}
        assert len(features_per_label[0]) == 1
        assert len(features_per_label[1]) == 2
        np.testing.assert_array_equal(features_per_label[0][0], np.array([0, 1, 2, 3]))
        np.testing.assert_array_equal(features_per_label[1][0], np.array([4, 5, 6, 7]))
        np.testing.assert_array_equal(features_per_label[1][1], np.array([8, 9, 10, 11]))
Exemplo n.º 3
0
    def test_process_features_does_not_delete_lines_with_zero_sum_if_not_all_zeros():
        features = np.array([
                    [1, 2, 3, 4],
                    [-1, 1, 0, 0],
                ])
        labels = np.array([1, 1])
        features_per_label = MethodEvaluation('CUB')._process_features(features_and_labels=(features, labels))

        assert 1 in features_per_label.keys()
        assert len(features_per_label[1]) == 2
Exemplo n.º 4
0
    def test_process_features_does_not_delete_lines_with_zeros_when_not_at_the_end():
        features = np.array([
                    [0, 0, 0, 0],
                    [4, 5, 6, 7],
                ])
        labels = np.array([1, 1])
        features_per_label = MethodEvaluation('CUB')._process_features(features_and_labels=(features, labels))

        assert 1 in features_per_label.keys()
        assert len(features_per_label[1]) == 2
Exemplo n.º 5
0
    def test_step_does_not_change_input_model(self, method):
        dataset = 'CUB'
        backbone = 'Conv4'

        args = dict(dataset=dataset,
                    backbone=backbone,
                    method=method,
                    train_aug=True,
                    n_iter=2)

        path_to_model = os.path.join(
            self.current_dir,
            path_to_step_output(dataset, backbone, method, 'tests_data'),
            'best_model.tar')
        model_1 = FetchModel(path_to_model).apply()

        path_to_features = os.path.join(
            self.current_dir,
            path_to_step_output(dataset, backbone, method, 'tests_data'),
            'novel.hdf5')
        features, labels = load_features_and_labels_from_file(path_to_features)

        MethodEvaluation(**args).apply(model_1, (features, labels))

        model_2 = FetchModel(path_to_model).apply()

        assert model_1['epoch'] == model_2['epoch']
        assert model_1['state'].keys() == model_2['state'].keys()
        for key in model_1['state'].keys():
            assert model_1['state'][key].equal(model_2['state'][key])
Exemplo n.º 6
0
    def test_step_does_not_return_error(self, method):
        dataset = 'CUB'
        backbone = 'Conv4'

        args = dict(
            dataset=dataset,
            backbone=backbone,
            method=method,
            train_aug=True,
            n_iter=2,
            n_swaps=2,
        )

        path_to_model = os.path.join(
            self.current_dir,
            path_to_step_output(dataset, backbone, method, 'tests_data'),
            '0.tar')
        model = FetchModel(path_to_model).apply()

        path_to_features = os.path.join(
            self.current_dir,
            path_to_step_output(dataset, backbone, method, 'tests_data'),
            'novel.hdf5')
        features, labels = load_features_and_labels_from_file(path_to_features)

        results = MethodEvaluation(**args).apply(model, (features, labels))
    def test_set_classification_task_output_shape(n_way, n_shot, n_query,
                                                  n_swaps, dim_features):
        number_img_per_label = 20
        method_evaluation_step = MethodEvaluation(dataset="dataset",
                                                  n_swaps=n_swaps,
                                                  n_query=n_query,
                                                  n_shot=n_shot,
                                                  test_n_way=n_way)

        labels = ["label_{k}".format(k=k) for k in range(50)]
        features_per_label = {
            label: np.random.rand(number_img_per_label, dim_features)
            for label in labels
        }
        classification_task = method_evaluation_step._set_classification_task(
            features_per_label)

        assert classification_task.shape == (n_way, n_shot + n_query,
                                             dim_features)
Exemplo n.º 8
0
    def test_step_does_not_return_error_for_maml(self, method):
        dataset = 'CUB'
        backbone = 'Conv4'

        args = dict(dataset=dataset,
                    backbone=backbone,
                    method=method,
                    train_aug=True,
                    n_iter=2)

        path_to_model = os.path.join(
            self.current_dir,
            path_to_step_output(dataset, backbone, method, 'tests_data'),
            'best_model.tar')
        model = FetchModel(path_to_model).apply()

        results = MethodEvaluation(**args).apply(model, None)