예제 #1
0
    def test_autograd_registation(self):
        """Tests registration of new autograd function."""

        # check that get_grad_fn() returns correct functions:
        for func_name, reference_func in gradients.FUNCTION_REGISTRY.items():
            grad_fn = gradients.get_grad_fn(func_name)
            self.assertEqual(grad_fn, reference_func)
            self.assertEqual(grad_fn.name, func_name)

        # check that non-existing functions return None:
        for invalid_func_name in ["bfobofb", "djhfhr"]:
            func = gradients.get_grad_fn(invalid_func_name)
            self.assertIsNone(func)

        # check that registering new classes works:
        for func_name in ["mock_func1", "mock_func2", "mock_func3"]:
            cls = type("%sName" % func_name, (AutogradFunction,), {})
            gradients.register_function(func_name)(cls)
            grad_fn = gradients.get_grad_fn(func_name)
            self.assertEqual(grad_fn, cls)
            self.assertEqual(grad_fn.name, func_name)

        # check that existing functions cannot be overwritten:
        for func_name in ["add", "sub", "view"]:
            cls = type("%sName" % func_name, (AutogradFunction,), {})
            with self.assertRaises(ValueError):
                gradients.register_function(func_name)(cls)
예제 #2
0
    def test_autograd_func_take(self):
        """Tests the part of autograd take that does not have a torch equivalent"""
        tensor_size = [5, 5, 5, 5]
        index = torch.tensor([[[1, 2], [3, 4]], [[4, 2], [1, 3]]], dtype=torch.long)

        # Test when dimension!=None
        for dimension in range(0, 4):
            tensor = get_random_test_tensor(size=tensor_size, is_float=True)
            ref_forward = torch.from_numpy(tensor.numpy().take(index, dimension))
            encrypted_tensor = crypten.cryptensor(tensor)
            encr_inputs = [encrypted_tensor, index, dimension]

            # test forward
            ctx = AutogradContext()
            grad_fn_take = gradients.get_grad_fn("take")
            encr_output = grad_fn_take.forward(ctx, encr_inputs)
            self._check(encr_output, ref_forward, "take forward failed: dimension set")

            # test backward:
            # first, recreate take forward function with only torch operations
            tensor2 = get_random_test_tensor(size=tensor_size, is_float=True)
            tensor2.requires_grad = True
            all_indices = [slice(0, x) for x in tensor2.size()]
            all_indices[dimension] = index
            ref_forward_torch = tensor2[all_indices]
            grad_output = torch.ones(ref_forward_torch.size())
            ref_forward_torch.backward(grad_output)

            # next, do backward pass on encrypted tensor
            encr_grad_output = encr_output.new(grad_output)
            encr_grad = grad_fn_take.backward(ctx, encr_grad_output)

            # finally, compare values
            self._check(encr_grad, tensor2.grad, "take backward failed: dimension set")
예제 #3
0
    def test_batchnorm(self):
        """
        Tests batchnorm forward and backward steps with training on / off.
        """
        # sizes for 1D, 2D, and 3D batchnorm
        # batch_size (dim=0) > 500 and increase tolerance to avoid flaky precision
        # errors in inv_var, which involves sqrt and reciprocal
        sizes = [(800, 5), (500, 8, 15), (600, 10, 3, 15)]
        tolerance = 0.5

        for size in sizes:
            for is_trainning in (False, True):
                tensor = get_random_test_tensor(size=size, is_float=True)
                tensor.requires_grad = True
                encrypted_input = crypten.cryptensor(tensor)

                C = size[1]
                weight = get_random_test_tensor(size=[C],
                                                max_value=1,
                                                is_float=True)
                bias = get_random_test_tensor(size=[C],
                                              max_value=1,
                                              is_float=True)
                weight.requires_grad = True
                bias.requires_grad = True

                # dimensions for mean and variance
                stats_dimensions = list(range(tensor.dim()))
                # perform on C dimension for tensor of shape (N, C, +)
                stats_dimensions.pop(1)

                running_mean = tensor.mean(stats_dimensions).detach()
                running_var = tensor.var(stats_dimensions).detach()
                enc_running_mean = encrypted_input.mean(stats_dimensions)
                enc_running_var = encrypted_input.var(stats_dimensions)

                reference = torch.nn.functional.batch_norm(tensor,
                                                           running_mean,
                                                           running_var,
                                                           weight=weight,
                                                           bias=bias)

                encrypted_input = AutogradCrypTensor(encrypted_input)
                ctx = AutogradContext()
                batch_norm_fn = gradients.get_grad_fn("batchnorm")
                encrypted_out = batch_norm_fn.forward(
                    ctx,
                    (encrypted_input, weight, bias),
                    training=is_trainning,
                    running_mean=enc_running_mean,
                    running_var=enc_running_var,
                )

                # check forward
                self._check(
                    encrypted_out,
                    reference,
                    "batchnorm forward failed with trainning "
                    f"{is_trainning} on {tensor.dim()}-D",
                    tolerance=tolerance,
                )

                # check backward (input, weight, and bias gradients)
                reference.backward(reference)
                encrypted_grad = batch_norm_fn.backward(ctx, encrypted_out)
                TorchGrad = namedtuple("TorchGrad", ["name", "value"])
                torch_gradients = [
                    TorchGrad("input gradient", tensor.grad),
                    TorchGrad("weight gradient", weight.grad),
                    TorchGrad("bias gradient", bias.grad),
                ]

                for i, torch_gradient in enumerate(torch_gradients):
                    self._check(
                        encrypted_grad[i],
                        torch_gradient.value,
                        f"batchnorm backward {torch_gradient.name} failed"
                        f"with training {is_trainning} on {tensor.dim()}-D",
                        tolerance=tolerance,
                    )
예제 #4
0
    def test_autograd_functions(self):
        """Tests individual autograd functions without testing autograd."""

        # input sizes for tests of autograd functions:
        input_size = {
            "t": (2, 4),
            "transpose": (4, 8, 3),
            "flip": (2, 3, 7, 2),
            "view": (8, 6),
            "reshape": (8, 6),
            "flatten": (8, 6),
            "narrow": (10, 7),
            "take": (5, 10, 15),  # NOTE: this only tests the pytorch take
            # functionality. The remaining take functionality
            # is tested separately
            "gather": (2, 2),
            "scatter": (3, 5),
            "roll": (4, 8),
            "squeeze": (12, 1, 6),
            "unsqueeze": (7, 3),
            "__getitem__": (6, 6),
            "neg": (8, 4),
            "relu": (3, 7),
            "tanh": (4, 3),
            "add": (10, 7),
            "sub": (9, 2),
            "mul": (3, 5),
            "matmul": (7, 7),
            "div": (5, 4),
            "pow": (4, 3),
            "square": (8, 5),
            "sqrt": (5, 6),
            "exp": (5, 2),
            "log": (3, 7),
            "dot": (8, ),
            "ger": (12, ),
            "sin": (5, 4),
            "cos": (9, 3),
            "abs": (8, 5),
            "sign": (8, 5),
            "norm":
            (3,
             2),  # NOTE: Flaky because sqrt only works for values up to 200.
            "sum": (4, 3),
            "cumsum": (13, 7),
            "trace": (4, 4),
            "mean": (2, 9),
            "var": (3, 4),
            "max": (6, 7),
            "min": (4, 5),
            "sigmoid": (4, 7),
            "softmax": (10, 5),
            "pad": (6, 3),
            # "avg_pool2d": (1, 3, 21, 21),     # TODO: Enable once avg_pool2d is
            #                                     fixed in gradients.py.
            "max_pool2d": (1, 3, 21, 21),
            "conv2d": (1, 4, 21, 21),
            "binary_cross_entropy": (8, ),
            "cross_entropy": (8, 4),
        }
        additional_args = {
            "transpose": [2, 0],
            "flip": [(1, 3, 2)],
            "view": [(4, 12)],
            "reshape": [(4, 12)],
            "narrow": [1, 2, 3],
            "gather": [1, torch.tensor([[0, 0], [1, 0]])],
            "scatter": [
                0,
                torch.tensor([[0, 1, 2, 0, 0], [2, 0, 0, 1, 2]]),
                get_random_test_tensor(size=(2, 5), is_float=True),
            ],
            "roll": [(2, -1), (0, 1)],
            "squeeze": [1],
            "unsqueeze": [1],
            "__getitem__": [1],
            "div": [4.0],
            "pow": [2.0],
            "cumsum": [1],
            "softmax": [1],
            "pad": [(1, 2, 3, 4)],
            "avg_pool2d": [5],
            "max_pool2d": [3],
            "conv2d":
            [get_random_test_tensor(size=(2, 4, 3, 3), is_float=True)],
            "take": [torch.tensor([0, 5, 10])],
            "binary_cross_entropy": [
                get_random_test_tensor(size=(8, ),
                                       is_float=True).gt(0.0).float()
            ],
            "cross_entropy": [
                onehot(get_random_test_tensor(size=(8, ), max_value=3).abs(),
                       num_targets=4)
            ],
        }
        binary_functions = ["add", "sub", "mul", "dot", "ger", "matmul"]
        positive_only = ["pow", "sqrt", "log", "binary_cross_entropy"]

        # loop over all autograd functions:
        for func_name in input_size.keys():

            # generate inputs:
            inputs = [
                get_random_test_tensor(size=input_size[func_name],
                                       max_value=1.0,
                                       is_float=True)
                for _ in range(2 if func_name in binary_functions else 1)
            ]
            if func_name in positive_only:  # some functions do not take negative values
                inputs = [input.abs().add_(0.001) for input in inputs]
            for input in inputs:
                input.requires_grad = True
            encr_inputs = [crypten.cryptensor(input) for input in inputs]
            number_of_inputs = len(inputs)

            # add additional arguments, encrypting only tensors (if found):
            if func_name in additional_args:
                inputs += additional_args[func_name]
                encr_inputs += additional_args[func_name]
                if func_name == "take":
                    encr_inputs += [None]
                elif func_name not in ["gather", "scatter"]:
                    encr_inputs = [
                        crypten.cryptensor(t) if torch.is_tensor(t) else t
                        for t in encr_inputs
                    ]

            # cross_entropy uses one-hot targets in crypten but not in PyTorch:
            if func_name == "cross_entropy":
                inputs[1] = inputs[1].argmax(1)

            # AutogradFunction.forward() does not accept unpacked inputs:
            if len(encr_inputs) == 1:
                encr_inputs = encr_inputs[0]

            # test forward function:
            if hasattr(inputs[0], func_name):  # torch.function()
                reference = getattr(inputs[0], func_name)(*inputs[1:])
            elif hasattr(F, func_name):  # torch.nn.functional.function()
                reference = getattr(F, func_name)(*inputs)
            elif func_name == "square":
                reference = inputs[0].pow(2.0)
            else:
                raise ValueError("unknown PyTorch function: %s" % func_name)
            ctx = AutogradContext()
            grad_fn = gradients.get_grad_fn(func_name)
            encr_output = grad_fn.forward(ctx, encr_inputs)
            self._check(encr_output, reference,
                        "%s forward failed" % func_name)
            if func_name == "view":
                ctx = AutogradContext()
                # check view() with a list of int to represent size.
                # encr_inputs[0]: input
                # encr_inputs[1]: tuple as torch.Size, to be unpacked.
                view_input, sizes = encr_inputs
                encr_output = grad_fn.forward(ctx, [view_input] +
                                              [size for size in sizes])
                self._check(encr_output, reference,
                            "%s forward failed" % func_name)

            # run backward functions:
            grad_output = get_random_test_tensor(max_value=2,
                                                 size=reference.size(),
                                                 is_float=True)
            encr_grad_output = encr_output.new(grad_output)
            reference.backward(grad_output)
            encr_grad = grad_fn.backward(ctx, encr_grad_output)

            # test result of running backward function:
            if not isinstance(encr_grad, (list, tuple)):
                encr_grad = (encr_grad, )
            for idx in range(number_of_inputs):
                self._check(encr_grad[idx], inputs[idx].grad,
                            "%s backward failed" % func_name)