Exemplo n.º 1
0
    def test_gradcheck(self, device, dtype):
        torch.manual_seed(42)

        input = torch.rand((1, 1, 3, 3), device=device, dtype=dtype)
        output = torch.rand((1, 1, 3, 3), device=device, dtype=dtype)
        input_transform = torch.rand((1, 3, 3), device=device, dtype=dtype)
        other_transform = torch.rand((1, 3, 3), device=device, dtype=dtype)

        input = utils.tensor_to_gradcheck_var(input)  # to var
        input_transform = utils.tensor_to_gradcheck_var(
            input_transform)  # to var
        output = utils.tensor_to_gradcheck_var(output)  # to var
        other_transform = utils.tensor_to_gradcheck_var(
            other_transform)  # to var

        input_param = {
            'batch_prob': torch.tensor([True]),
            'params': {
                'x': input_transform
            },
            'flags': {}
        }

        augmentation = AugmentationBase2D(return_transform=True, p=1.0)

        with patch.object(augmentation, "apply_transform",
                          autospec=True) as apply_transform, patch.object(
                              augmentation,
                              "compute_transformation",
                              autospec=True) as compute_transformation:

            apply_transform.return_value = output
            compute_transformation.return_value = other_transform
            assert gradcheck(augmentation, ((input, input_param)),
                             raise_exception=True)
Exemplo n.º 2
0
    def test_forward(self, device, dtype):
        torch.manual_seed(42)
        input = torch.rand((2, 3, 4, 5), device=device, dtype=dtype)
        input_transform = torch.rand((2, 3, 3), device=device, dtype=dtype)
        expected_output = torch.rand((2, 3, 4, 5), device=device, dtype=dtype)
        expected_transform = torch.rand((2, 3, 3), device=device, dtype=dtype)
        augmentation = AugmentationBase2D(return_transform=False, p=1.0)

        with patch.object(
                augmentation, "apply_transform",
                autospec=True) as apply_transform, patch.object(
                    augmentation, "generate_parameters",
                    autospec=True) as generate_parameters, patch.object(
                        augmentation, "compute_transformation",
                        autospec=True) as compute_transformation:

            # Calling the augmentation with a single tensor shall return the expected tensor using the generated params.
            params = {'params': {}, 'flags': {'foo': 0}}
            generate_parameters.return_value = params
            apply_transform.return_value = expected_output
            compute_transformation.return_value = expected_transform
            output = augmentation(input)
            # RuntimeError: Boolean value of Tensor with more than one value is ambiguous
            # Not an easy fix, happens on verifying torch.tensor([True, True])
            # _params = {'batch_prob': torch.tensor([True, True]), 'params': {}, 'flags': {'foo': 0}}
            # apply_transform.assert_called_once_with(input, _params)
            assert output is expected_output

            # Calling the augmentation with a tensor and set return_transform shall
            # return the expected tensor and transformation.
            output, transformation = augmentation(input, return_transform=True)
            assert output is expected_output
            assert_close(transformation, expected_transform)

            # Calling the augmentation with a tensor and params shall return the expected tensor using the given params.
            params = {'params': {}, 'flags': {'bar': 1}}
            apply_transform.reset_mock()
            generate_parameters.return_value = None
            output = augmentation(input, params=params)
            # RuntimeError: Boolean value of Tensor with more than one value is ambiguous
            # Not an easy fix, happens on verifying torch.tensor([True, True])
            # _params = {'batch_prob': torch.tensor([True, True]), 'params': {}, 'flags': {'foo': 0}}
            # apply_transform.assert_called_once_with(input, _params)
            assert output is expected_output

            # Calling the augmentation with a tensor,a transformation and set
            # return_transform shall return the expected tensor and the proper
            # transformation matrix.
            expected_final_transformation = expected_transform @ input_transform
            output, transformation = augmentation((input, input_transform),
                                                  return_transform=True)
            assert output is expected_output
            assert torch.allclose(expected_final_transformation,
                                  transformation)
            assert transformation.shape[0] == input.shape[0]
Exemplo n.º 3
0
 def test_check_batching(self, device, dtype, input_shape, in_trans_shape):
     input = torch.rand(input_shape, device=device, dtype=dtype)
     in_trans = torch.rand(in_trans_shape, device=device, dtype=dtype)
     augmentation = AugmentationBase2D(p=1.0, p_batch=1)
     augmentation.__check_batching__(input)
     augmentation.__check_batching__((input, in_trans))