Exemplo n.º 1
0
    def test_autograd_func_take(self):
        """Tests the part of autograd take that does not have a torch equivalent"""
        tensor_size = [5, 5, 5, 5]
        index = torch.tensor([[[1, 2], [3, 4]], [[4, 2], [1, 3]]], dtype=torch.long)

        # Test when dimension!=None
        for dimension in range(0, 4):
            tensor = get_random_test_tensor(size=tensor_size, is_float=True)
            ref_forward = torch.from_numpy(tensor.numpy().take(index, dimension))
            encrypted_tensor = crypten.cryptensor(tensor)
            encr_inputs = [encrypted_tensor, index, dimension]

            # test forward
            ctx = AutogradContext()
            grad_fn_take, _ = gradients.get_grad_fn("take")
            encr_output = grad_fn_take.forward(ctx, *encr_inputs)
            self._check(encr_output, ref_forward, "take forward failed: dimension set")

            # test backward:
            # first, recreate take forward function with only torch operations
            tensor2 = get_random_test_tensor(size=tensor_size, is_float=True)
            tensor2.requires_grad = True
            all_indices = [slice(0, x) for x in tensor2.size()]
            all_indices[dimension] = index
            ref_forward_torch = tensor2[all_indices]
            grad_output = torch.ones(ref_forward_torch.size())
            ref_forward_torch.backward(grad_output)

            # next, do backward pass on encrypted tensor
            encr_grad_output = encr_output.new(grad_output)
            encr_grad = grad_fn_take.backward(ctx, encr_grad_output)

            # finally, compare values
            self._check(encr_grad, tensor2.grad, "take backward failed: dimension set")
Exemplo n.º 2
0
    def test_non_differentiable_marking(self):
        """Tests whether marking of non-differentiability works correctly."""

        # generate random inputs:
        inputs = [get_random_test_tensor(is_float=True) for _ in range(5)]
        inputs = [crypten.cryptensor(input) for input in inputs]
        ctx = AutogradContext()

        # repeat test multiple times:
        for _ in range(10):

            # mark non-differentiable inputs as such:
            differentiable = [
                random.random() > 0.5 for _ in range(len(inputs))
            ]
            for idx, diff in enumerate(differentiable):
                if not diff:
                    ctx.mark_non_differentiable(inputs[idx])

            # check that inputs were correctly marked:
            for idx, input in enumerate(inputs):
                self.assertEqual(
                    ctx.is_differentiable(input),
                    differentiable[idx],
                    "marking of differentiability failed",
                )
            ctx.reset()

        # test behavior of autograd in CrypTensor:
        input = inputs[0]
        input.requires_grad = True
        reference = [True, True, False]
        for func_name in ["min", "max"]:
            outputs = [None] * 3
            outputs[0] = getattr(input, func_name)()
            outputs[1], outputs[2] = getattr(input, func_name)(0)
            for idx, output in enumerate(outputs):
                self.assertEqual(
                    output.requires_grad,
                    reference[idx],
                    "value of requires_grad is incorrect",
                )

        # behavior of max_pool2d in which indices are returned:
        input = get_random_test_tensor(size=(1, 3, 8, 8), is_float=True)
        input = crypten.cryptensor(input, requires_grad=True)
        reference = [True, True, False]
        outputs = [None] * 3
        outputs[0] = input.max_pool2d(2, return_indices=False)
        outputs[1], outputs[2] = input.max_pool2d(2, return_indices=True)
        for idx, output in enumerate(outputs):
            self.assertEqual(
                output.requires_grad,
                reference[idx],
                "value of requires_grad is incorrect",
            )
Exemplo n.º 3
0
    def test_batchnorm(self):
        """
        Tests batchnorm forward and backward steps with training on / off.
        """
        tolerance = 0.1
        sizes = [(8, 5), (16, 3), (32, 5), (8, 6, 4), (8, 4, 3, 5)]
        for size in sizes:
            for is_training in (False, True):

                # sample input data, weight, and bias:
                tensor = get_random_test_tensor(size=size, is_float=True)
                encrypted_input = crypten.cryptensor(tensor)
                C = size[1]
                weight = get_random_test_tensor(size=[C],
                                                max_value=1,
                                                is_float=True)
                bias = get_random_test_tensor(size=[C],
                                              max_value=1,
                                              is_float=True)
                weight.requires_grad = True
                bias.requires_grad = True

                # dimensions over which means and variances are computed:
                stats_dimensions = list(range(tensor.dim()))
                stats_dimensions.pop(1)

                # dummy running mean and variance:
                running_mean = tensor.mean(stats_dimensions).detach()
                running_var = tensor.var(stats_dimensions).detach()
                enc_running_mean = crypten.cryptensor(running_mean)
                enc_running_var = crypten.cryptensor(running_var)

                # compute reference output:
                tensor.requires_grad = True
                reference = torch.nn.functional.batch_norm(
                    tensor,
                    running_mean,
                    running_var,
                    weight=weight,
                    bias=bias,
                    training=is_training,
                )

                # compute CrypTen output:
                encrypted_input.requires_grad = True
                ctx = AutogradContext()
                batch_norm_fn, _ = crypten.gradients.get_grad_fn("batchnorm")
                with crypten.no_grad():
                    encrypted_out = batch_norm_fn.forward(
                        ctx,
                        encrypted_input,
                        weight,
                        bias,
                        training=is_training,
                        running_mean=enc_running_mean,
                        running_var=enc_running_var,
                    )

                # check forward
                self._check(
                    encrypted_out,
                    reference,
                    "batchnorm forward failed with training "
                    f"{is_training} on {tensor.dim()}-D",
                    tolerance=tolerance,
                )

                # check backward (input, weight, and bias gradients):
                reference.backward(reference)
                with crypten.no_grad():
                    encrypted_grad = batch_norm_fn.backward(ctx, encrypted_out)
                TorchGrad = namedtuple("TorchGrad", ["name", "value"])
                torch_gradients = [
                    TorchGrad("input gradient", tensor.grad),
                    TorchGrad("weight gradient", weight.grad),
                    TorchGrad("bias gradient", bias.grad),
                ]
                for i, torch_gradient in enumerate(torch_gradients):
                    self._check(
                        encrypted_grad[i],
                        torch_gradient.value,
                        f"batchnorm backward {torch_gradient.name} failed "
                        f"with training {is_training} on {tensor.dim()}-D",
                        tolerance=tolerance,
                    )
Exemplo n.º 4
0
    def test_batchnorm(self):
        """
        Tests batchnorm forward and backward steps with training on / off.
        """
        # sizes for 1D, 2D, and 3D batchnorm
        # batch_size (dim=0) > 500 and increase tolerance to avoid flaky precision
        # errors in inv_var, which involves sqrt and reciprocal
        sizes = [(800, 5), (500, 8, 15), (600, 10, 3, 15)]
        tolerance = 0.5

        for size in sizes:
            for is_trainning in (False, True):
                tensor = get_random_test_tensor(size=size, is_float=True)
                tensor.requires_grad = True
                encrypted_input = crypten.cryptensor(tensor)

                C = size[1]
                weight = get_random_test_tensor(size=[C], max_value=1, is_float=True)
                bias = get_random_test_tensor(size=[C], max_value=1, is_float=True)
                weight.requires_grad = True
                bias.requires_grad = True

                # dimensions for mean and variance
                stats_dimensions = list(range(tensor.dim()))
                # perform on C dimension for tensor of shape (N, C, +)
                stats_dimensions.pop(1)

                running_mean = tensor.mean(stats_dimensions).detach()
                running_var = tensor.var(stats_dimensions).detach()
                enc_running_mean = encrypted_input.mean(stats_dimensions)
                enc_running_var = encrypted_input.var(stats_dimensions)

                reference = torch.nn.functional.batch_norm(
                    tensor, running_mean, running_var, weight=weight, bias=bias
                )

                encrypted_input.requires_grad = True
                ctx = AutogradContext()
                batch_norm_fn = crypten.gradients.get_grad_fn("batchnorm")
                encrypted_out = batch_norm_fn.forward(
                    ctx,
                    encrypted_input,
                    weight,
                    bias,
                    training=is_trainning,
                    running_mean=enc_running_mean,
                    running_var=enc_running_var,
                )

                # check forward
                self._check(
                    encrypted_out,
                    reference,
                    "batchnorm forward failed with trainning "
                    f"{is_trainning} on {tensor.dim()}-D",
                    tolerance=tolerance,
                )

                # check backward (input, weight, and bias gradients)
                reference.backward(reference)
                encrypted_grad = batch_norm_fn.backward(ctx, encrypted_out)
                TorchGrad = namedtuple("TorchGrad", ["name", "value"])
                torch_gradients = [
                    TorchGrad("input gradient", tensor.grad),
                    TorchGrad("weight gradient", weight.grad),
                    TorchGrad("bias gradient", bias.grad),
                ]

                for i, torch_gradient in enumerate(torch_gradients):
                    self._check(
                        encrypted_grad[i],
                        torch_gradient.value,
                        f"batchnorm backward {torch_gradient.name} failed"
                        f"with training {is_trainning} on {tensor.dim()}-D",
                        tolerance=tolerance,
                    )