Ejemplo n.º 1
0
    def test_cross_entropy(self):
        """Tests cross_entropy and binary_cross_entropy"""
        sizes = [(3, 2), (8, 4), (5, 10)]
        losses = ["binary_cross_entropy", "cross_entropy"]

        for size, loss in itertools.product(sizes, losses):
            batch_size, num_targets = size
            if loss == "binary_cross_entropy":
                tensor = get_random_test_tensor(
                    size=(batch_size,), max_value=1.0, is_float=True
                )
                tensor = tensor.abs().add_(0.001)

                target = get_random_test_tensor(size=(batch_size,), is_float=True)
                target = target.gt(0.0).float()
                target_encr = crypten.cryptensor(target)
            else:
                tensor = get_random_test_tensor(size=size, is_float=True)
                target = get_random_test_tensor(
                    size=(batch_size,), max_value=num_targets - 1
                )
                target = onehot(target.abs(), num_targets=num_targets)
                target_encr = crypten.cryptensor(target)
                # CrypTen, unlike PyTorch, uses one-hot targets
                target = target.argmax(1)

            # forward
            tensor.requires_grad = True
            tensor_encr = AutogradCrypTensor(
                crypten.cryptensor(tensor), requires_grad=True
            )
            reference = getattr(torch.nn.functional, loss)(tensor, target)
            out_encr = getattr(tensor_encr, loss)(target_encr)
            self._check(out_encr, reference, f"{loss} forward failed")

            # backward
            grad_out = get_random_test_tensor(size=reference.shape, is_float=True)
            grad_out_encr = crypten.cryptensor(grad_out)
            reference.backward(grad_out)
            out_encr.backward(grad_out_encr)
            self._check(tensor_encr.grad, tensor.grad, f"{loss} backward failed with")
Ejemplo n.º 2
0
    def test_cat_stack(self):
        for func in ["cat", "stack"]:
            for dimensions in range(1, 5):
                size = [5] * dimensions
                for num_tensors in range(1, 5):
                    for dim in range(dimensions):
                        tensors = [
                            get_random_test_tensor(size=size, is_float=True)
                            for _ in range(num_tensors)
                        ]
                        encrypted_tensors = [
                            AutogradCrypTensor(crypten.cryptensor(t))
                            for t in tensors
                        ]
                        for i in range(len(tensors)):
                            tensors[i].grad = None
                            tensors[i].requires_grad = True
                            encrypted_tensors[i].grad = None
                            encrypted_tensors[i].requires_grad = True

                        # Forward
                        reference = getattr(torch, func)(tensors, dim=dim)
                        encrypted_out = getattr(crypten,
                                                func)(encrypted_tensors,
                                                      dim=dim)
                        self._check(encrypted_out, reference,
                                    f"{func} forward failed")

                        # Backward
                        grad_output = get_random_test_tensor(
                            size=reference.size(), is_float=True)
                        encrypted_grad_output = crypten.cryptensor(grad_output)

                        reference.backward(grad_output)
                        encrypted_out.backward(encrypted_grad_output)
                        for i in range(len(tensors)):
                            self._check(
                                encrypted_tensors[i].grad,
                                tensors[i].grad,
                                f"{func} backward failed",
                            )
Ejemplo n.º 3
0
    def test_sum(self):
        """Tests sum reduction on encrypted tensor."""
        tensor = get_random_test_tensor(size=(5, 100, 100), is_float=True)
        encrypted = ArithmeticSharedTensor(tensor)
        self._check(encrypted.sum(), tensor.sum(), "sum failed")

        for dim in [0, 1, 2]:
            reference = tensor.sum(dim)
            with self.benchmark(type="sum", dim=dim) as bench:
                for _ in bench.iters:
                    encrypted_out = encrypted.sum(dim)
            self._check(encrypted_out, reference, "sum failed")
Ejemplo n.º 4
0
 def test_gather_random(self):
     sizes = [(), (1, ), (5, ), (5, 5), (5, 5, 5), (1000, )]
     for rank in range(self.world_size):
         for size in sizes:
             tensor = get_random_test_tensor(size=size)
             result = comm.get().gather(tensor, rank)
             if rank == self.rank:
                 self.assertTrue(isinstance(result, list))
                 for res in result:
                     self.assertTrue((res == tensor).all())
             else:
                 self.assertIsNone(result[0])
Ejemplo n.º 5
0
    def test_training(self):
        """
        Tests training of simple model in crypten.nn.
        """

        # create MLP with one hidden layer:
        learning_rate = 0.1
        batch_size, num_inputs, num_intermediate, num_outputs = 8, 10, 5, 1
        model = crypten.nn.Sequential(
            crypten.nn.Linear(num_inputs, num_intermediate),
            crypten.nn.ReLU(),
            crypten.nn.Linear(num_intermediate, num_outputs),
        )
        model.train()
        model.encrypt()
        loss = crypten.nn.MSELoss()

        # perform training iterations:
        for _ in range(10):
            for compute_gradients in [True, False]:

                # get training sample:
                input = get_random_test_tensor(
                    size=(batch_size, num_inputs), is_float=True
                )
                target = input.mean(dim=1, keepdim=True)

                # encrypt training sample:
                input = crypten.cryptensor(input)
                target = crypten.cryptensor(target)
                if compute_gradients:
                    input.requires_grad = True
                    target.requires_grad = True

                # perform forward pass:
                output = model(input)
                loss_value = loss(output, target)

                # set gradients to "zero" (setting to None is more efficient):
                model.zero_grad()
                for param in model.parameters():
                    self.assertIsNone(param.grad, "zero_grad did not reset gradients")

                # perform backward pass:
                loss_value.backward()

                # perform parameter update:
                reference = {}
                reference = self._compute_reference_parameters(
                    "", reference, model, learning_rate
                )
                model.update_parameters(learning_rate)
                self._check_reference_parameters("", reference, model)
Ejemplo n.º 6
0
    def test_torch_comparators(self):
        """Test torch comparators on CUDALongTensor"""
        for comp in ["gt", "ge", "lt", "le", "eq", "ne"]:
            tensor = get_random_test_tensor(is_float=False)
            tensor2 = get_random_test_tensor(is_float=False)

            t_cuda = CUDALongTensor(tensor)
            t2_cuda = CUDALongTensor(tensor2)

            reference = getattr(torch, comp)(tensor, tensor2).long()
            result1 = getattr(t_cuda, comp)(t2_cuda)
            result2 = getattr(torch, comp)(t_cuda, t2_cuda)

            self.assertTrue(
                type(result1) == CUDALongTensor, "result should be a CUDALongTensor"
            )
            self.assertTrue(
                type(result2) == CUDALongTensor, "result should be a CUDALongTensor"
            )
            self._check_int(result1.cpu(), reference, "%s comparator failed" % comp)
            self._check_int(result2.cpu(), reference, "%s comparator failed" % comp)
Ejemplo n.º 7
0
    def test_torch_broadcast_tensor(self):
        """Test torch.broadcast_tensor on CUDALongTensor"""
        x = get_random_test_tensor(size=(1, 5), is_float=False)
        y = get_random_test_tensor(size=(5, 1), is_float=False)

        x_cuda = CUDALongTensor(x)
        y_cuda = CUDALongTensor(y)

        a, b = torch.broadcast_tensors(x, y)
        a_cuda, b_cuda = torch.broadcast_tensors(x_cuda, y_cuda)

        self.assertTrue(
            type(a_cuda) == CUDALongTensor,
            "result should be a CUDALongTensor")
        self.assertTrue(
            type(b_cuda) == CUDALongTensor,
            "result should be a CUDALongTensor")
        self._check_int(a, a_cuda.cpu(),
                        "torch.broadcast_tensor failed for CUDALongTensor")
        self._check_int(b, b_cuda.cpu(),
                        "torch.broadcast_tensor failed for CUDALongTensor")
Ejemplo n.º 8
0
    def test_sum(self):
        """Tests sum using binary shares"""
        tensor = get_random_test_tensor(size=(5, 5, 5), is_float=False)
        encrypted = BinarySharedTensor(tensor)
        self._check(encrypted.sum(), tensor.sum(), "sum failed")

        for dim in [0, 1, 2]:
            reference = tensor.sum(dim)
            with self.benchmark(type="sum", dim=dim) as bench:
                for _ in bench.iters:
                    encrypted_out = encrypted.sum(dim)
            self._check(encrypted_out, reference, "sum failed")
Ejemplo n.º 9
0
    def test_squeeze_unsqueeze(self):
        """Test addition and removal of tensor dimensions"""
        for size in SIZES:
            tensor = get_random_test_tensor(size=size, is_float=True)

            self._check_forward_backward("squeeze", tensor)
            for dim in range(tensor.dim()):
                self._check_forward_backward("squeeze", tensor, dim)
                self._check_forward_backward("unsqueeze", tensor, dim)

            # Check unsqueeze on last dimension
            self._check_forward_backward("unsqueeze", tensor, tensor.dim())
Ejemplo n.º 10
0
    def test_where(self):
        """Test that crypten.where properly conditions"""
        sizes = [(10,), (5, 10), (1, 5, 10)]
        y_types = [lambda x: x, crypten.cryptensor]

        for size, y_type in itertools.product(sizes, y_types):
            tensor1 = get_random_test_tensor(size=size, is_float=True)
            encrypted_tensor1 = crypten.cryptensor(tensor1)
            tensor2 = get_random_test_tensor(size=size, is_float=True)
            encrypted_tensor2 = y_type(tensor2)

            condition_tensor = (
                get_random_test_tensor(max_value=1, size=size, is_float=False) + 1
            )
            condition_encrypted = crypten.cryptensor(condition_tensor)
            condition_bool = condition_tensor.bool()

            reference_out = torch.where(condition_bool, tensor1, tensor2)

            encrypted_out = crypten.where(
                condition_bool, encrypted_tensor1, encrypted_tensor2
            )

            y_is_private = crypten.is_encrypted_tensor(tensor2)
            self._check(
                encrypted_out,
                reference_out,
                f"{'private' if y_is_private else 'public'} y "
                "where failed with public condition",
            )

            encrypted_out = encrypted_tensor1.where(
                condition_encrypted, encrypted_tensor2
            )
            self._check(
                encrypted_out,
                reference_out,
                f"{'private' if y_is_private else 'public'} y "
                "where failed with private condition",
            )
Ejemplo n.º 11
0
    def test_where(self):
        """Tests where() conditional element selection"""
        sizes = [(10,), (5, 10), (1, 5, 10)]
        y_types = [lambda x: x, BinarySharedTensor]

        for size, y_type in itertools.product(sizes, y_types):
            tensor1 = get_random_test_tensor(size=size, is_float=False)
            encrypted_tensor1 = BinarySharedTensor(tensor1)
            tensor2 = get_random_test_tensor(size=size, is_float=False)
            encrypted_tensor2 = y_type(tensor2)

            condition_tensor = (
                get_random_test_tensor(max_value=1, size=[1], is_float=False) + 1
            )
            condition_encrypted = BinarySharedTensor(condition_tensor)
            condition_bool = condition_tensor.bool()

            reference_out = tensor1 * condition_tensor + tensor2 * (
                1 - condition_tensor
            )

            encrypted_out = encrypted_tensor1.where(condition_bool, encrypted_tensor2)

            y_is_private = y_type == BinarySharedTensor
            self._check(
                encrypted_out,
                reference_out,
                f"{'private' if y_is_private else 'public'} y "
                "where failed with public condition",
            )

            encrypted_out = encrypted_tensor1.where(
                condition_encrypted, encrypted_tensor2
            )
            self._check(
                encrypted_out,
                reference_out,
                f"{'private' if y_is_private else 'public'} y "
                "where failed with private condition",
            )
Ejemplo n.º 12
0
    def _conv2d(self, image_size, in_channels):
        """Test convolution of encrypted tensor with public/private tensors."""
        nbatches = [1, 3]
        kernel_sizes = [(1, 1), (2, 2), (2, 3)]
        ochannels = [1, 3]
        paddings = [0, 1, (0, 1)]
        strides = [1, 2, (1, 2)]
        dilations = [1, 2, (1, 2)]
        groupings = [1, 2]

        for (
            batches,
            kernel_size,
            out_channels,
            padding,
            stride,
            dilation,
            groups,
        ) in itertools.product(
            nbatches, kernel_sizes, ochannels, paddings, strides, dilations, groupings
        ):
            # TODO: Fix conv2d gradient in this case:
            if in_channels > 1 and groups > 1:
                continue

            size = (batches, in_channels * groups, *image_size)
            image = get_random_test_tensor(size=size, is_float=True)

            kernel_size = (out_channels * groups, in_channels, *kernel_size)
            kernel = get_random_test_tensor(size=kernel_size, is_float=True)

            self._check_forward_backward(
                "conv2d",
                image,
                kernel,
                stride=stride,
                padding=padding,
                dilation=dilation,
                groups=groups,
            )
Ejemplo n.º 13
0
    def test_gather_scatter(self):
        """Tests gather and scatter gradients"""
        sizes = [(2, 2), (3, 5), (3, 5, 10)]
        indices = [[0, 1, 0, 0], [0, 1, 0, 0, 1] * 3, [0, 0, 1] * 50]
        dims = [0, 1]
        funcs = ["scatter", "gather"]

        for dim, func in itertools.product(dims, funcs):
            for size, index in zip(sizes, indices):
                tensor = get_random_test_tensor(size=size, is_float=True)
                index = torch.tensor(index).reshape(tensor.shape)

                tensor.requires_grad = True
                tensor_encr = AutogradCrypTensor(crypten.cryptensor(tensor),
                                                 requires_grad=True)

                if func == "gather":
                    reference = getattr(tensor, func)(dim, index)
                    out_encr = getattr(tensor_encr, func)(dim, index)
                else:
                    src = get_random_test_tensor(size=index.shape,
                                                 is_float=True)
                    reference = getattr(tensor, func)(dim, index, src)
                    out_encr = getattr(tensor_encr, func)(dim, index, src)

                self._check(out_encr, reference,
                            f"{func} forward failed with index {index}")

                grad_out = get_random_test_tensor(size=reference.shape,
                                                  is_float=True)
                grad_out_encr = crypten.cryptensor(grad_out)
                reference.backward(grad_out)
                out_encr.backward(grad_out_encr)

                self._check(
                    tensor_encr.grad,
                    tensor.grad,
                    f"{func} backward failed with index {index}",
                )
Ejemplo n.º 14
0
    def test_torch_scatter(self):
        """ Test scatter/scatter_add function of CUDALongTensor

            This test will be skipped for now since torch.scatter provides
            inconsistent result given the same input on CUDA. This is likely
            due to a potential bug on pytorch's implementation of scatter
        """

        funcs = ["scatter", "scatter_add"]
        sizes = [(5, 5), (5, 5, 5), (5, 5, 5, 5)]
        for func in funcs:
            for size in sizes:
                for dim in range(len(size)):
                    tensor1 = get_random_test_tensor(size=size, is_float=False)
                    tensor2 = get_random_test_tensor(size=size, is_float=False)
                    index = get_random_test_tensor(size=size, is_float=False)
                    index = index.abs().clamp(0, 4)

                    t1_cuda = CUDALongTensor(tensor1)
                    t2_cuda = CUDALongTensor(tensor2)
                    idx_cuda = CUDALongTensor(index)
                    reference = getattr(torch, func)(tensor1, dim, index,
                                                     tensor2)
                    result = getattr(torch, func)(t1_cuda, dim, idx_cuda,
                                                  t2_cuda)
                    result2 = getattr(t1_cuda, func)(dim, idx_cuda, t2_cuda)

                    self.assertTrue(
                        type(result) == CUDALongTensor,
                        "result should be a CUDALongTensor",
                    )
                    self.assertTrue(
                        type(result2) == CUDALongTensor,
                        "result should be a CUDALongTensor",
                    )
                    self._check_int(result.cpu(), reference,
                                    "{} failed".format(func))
                    self._check_int(result2.cpu(), reference,
                                    "{} failed".format(func))
Ejemplo n.º 15
0
    def test_index_add(self):
        """Test index_add function of encrypted tensor"""
        index_add_functions = ["index_add", "index_add_"]
        tensor_size1 = [5, 5, 5, 5]
        index = torch.tensor([1, 2, 3, 4, 4, 2, 1, 3], dtype=torch.long)
        for dimension in range(0, 4):
            tensor_size2 = [5, 5, 5, 5]
            tensor_size2[dimension] = index.size(0)
            for func in index_add_functions:
                for tensor_type in [lambda x: x, ArithmeticSharedTensor]:
                    tensor1 = get_random_test_tensor(size=tensor_size1, is_float=True)
                    tensor2 = get_random_test_tensor(size=tensor_size2, is_float=True)
                    encrypted = ArithmeticSharedTensor(tensor1)
                    encrypted2 = tensor_type(tensor2)

                    reference = getattr(tensor1, func)(dimension, index, tensor2)
                    encrypted_out = getattr(encrypted, func)(
                        dimension, index, encrypted2
                    )
                    private = tensor_type == ArithmeticSharedTensor
                    self._check(
                        encrypted_out,
                        reference,
                        "%s %s failed" % ("private" if private else "public", func),
                    )
                    if func.endswith("_"):
                        # Check in-place index_add worked
                        self._check(
                            encrypted,
                            reference,
                            "%s %s failed" % ("private" if private else "public", func),
                        )
                    else:
                        # Check original is not modified
                        self._check(
                            encrypted,
                            tensor1,
                            "%s %s failed" % ("private" if private else "public", func),
                        )
Ejemplo n.º 16
0
    def test_square(self):
        """Tests square function gradient.
        Note: torch pow(2) is used to verify gradient,
            since PyTorch does not implement square().
        """
        for size in SIZES:
            tensor = get_random_test_tensor(size=size, is_float=True)
            tensor.requires_grad = True
            tensor_encr = crypten.cryptensor(tensor, requires_grad=True)

            out = tensor.pow(2)
            out_encr = tensor_encr.square()
            self._check(out_encr, out, f"square forward failed with size {size}")

            grad_output = get_random_test_tensor(size=out.shape, is_float=True)
            out.backward(grad_output)
            out_encr.backward(crypten.cryptensor(grad_output))
            self._check(
                tensor_encr.grad,
                tensor.grad,
                f"square backward failed with size {size}",
            )
Ejemplo n.º 17
0
    def test_prod(self):
        """Tests prod reduction on encrypted tensor."""
        # Increaing size to reduce relative error due to quantization
        tensor = get_random_test_tensor(size=(5, 5, 5), is_float=False)
        encrypted = ArithmeticSharedTensor(tensor)
        self._check(encrypted.prod(), tensor.prod().float(), "prod failed")

        for dim in [0, 1, 2]:
            reference = tensor.prod(dim).float()
            with self.benchmark(type="prod", dim=dim) as bench:
                for _ in bench.iters:
                    encrypted_out = encrypted.prod(dim)
            self._check(encrypted_out, reference, "prod failed")
Ejemplo n.º 18
0
    def test_view_reshape(self):
        """Tests view and reshape gradients"""
        size_to_views = {
            (10, ): [(5, 2), (1, 10)],
            (10, 5): [(50), (2, 5, 5)],
            (5, 10, 8): [(400), (50, 8), (5, 5, 2, 8)],
        }

        for size in size_to_views:
            for view in size_to_views[size]:
                tensor = get_random_test_tensor(size=size, is_float=True)
                self._check_forward_backward("view", tensor, view)
                self._check_forward_backward("reshape", tensor, view)
Ejemplo n.º 19
0
    def test_torch_arithmetic(self):
        """Test torch arithmetic on CUDALongTensor"""
        funcs = ["add", "sub", "mul", "div"]
        a = get_random_test_tensor(is_float=False)
        b = get_random_test_tensor(min_value=1, is_float=False)

        a_cuda = CUDALongTensor(a)
        b_cuda = CUDALongTensor(b)

        for op in funcs:
            reference = getattr(torch, op)(a, b)
            result = getattr(torch, op)(a_cuda, b_cuda)
            result2 = getattr(a_cuda, op)(b_cuda)

            self.assertTrue(type(result), CUDALongTensor)
            self._check_int(reference, result.cpu(),
                            "torch.{} failed for CUDALongTensor".format(op))
            self._check_int(
                reference,
                result2.cpu(),
                "torch.{} failed for CUDALongTensor".format(op),
            )
Ejemplo n.º 20
0
    def test_torch_gather(self):
        """Test torch.gather on CUDALongTensor"""
        sizes = [(5, 5), (5, 5, 5), (5, 5, 5, 5)]
        for size in sizes:
            for dim in range(len(size)):
                tensor = get_random_test_tensor(size=size, is_float=False)
                index = get_random_test_tensor(size=size, is_float=False)
                index = index.abs().clamp(0, 4)

                t_cuda = CUDALongTensor(tensor)
                idx_cuda = CUDALongTensor(index)

                reference = tensor.gather(dim, index)
                result = t_cuda.gather(dim, idx_cuda)
                result2 = torch.gather(t_cuda, dim, idx_cuda)

                self._check_int(
                    result.cpu(), reference, f"gather failed with size {size}"
                )
                self._check_int(
                    result2.cpu(), reference, f"gather failed with size {size}"
                )
Ejemplo n.º 21
0
 def test_scatter(self):
     """Test scatter/scatter_add function of encrypted tensor"""
     funcs = ["scatter", "scatter_", "scatter_add", "scatter_add_"]
     sizes = [(5, 5), (5, 5, 5), (5, 5, 5, 5)]
     for func in funcs:
         for size in sizes:
             for tensor_type in [lambda x: x, ArithmeticSharedTensor]:
                 for dim in range(len(size)):
                     tensor1 = get_random_test_tensor(size=size, is_float=True)
                     tensor2 = get_random_test_tensor(size=size, is_float=True)
                     index = get_random_test_tensor(size=size, is_float=False)
                     index = index.abs().clamp(0, 4)
                     encrypted = ArithmeticSharedTensor(tensor1)
                     encrypted2 = tensor_type(tensor2)
                     reference = getattr(tensor1, func)(dim, index, tensor2)
                     encrypted_out = getattr(encrypted, func)(dim, index, encrypted2)
                     private = tensor_type == ArithmeticSharedTensor
                     self._check(
                         encrypted_out,
                         reference,
                         "%s %s failed" % ("private" if private else "public", func),
                     )
                     if func.endswith("_"):
                         # Check in-place scatter/scatter-add modified input
                         self._check(
                             encrypted,
                             reference,
                             "%s %s failed to modify input"
                             % ("private" if private else "public", func),
                         )
                     else:
                         # Check original is not modified
                         self._check(
                             encrypted,
                             tensor1,
                             "%s %s unintendedly modified input"
                             % ("private" if private else "public", func),
                         )
Ejemplo n.º 22
0
    def _conv1d(self, signal_size, in_channels):
        """Test convolution of encrypted tensor with public/private tensors."""
        nbatches = [1, 3]
        kernel_sizes = [1, 2, 3]
        ochannels = [1, 3, 6]
        paddings = [0, 1]
        strides = [1, 2]

        for func_name in ["conv1d", "conv_transpose1d"]:
            for kernel_type in [lambda x: x, ArithmeticSharedTensor]:
                for (
                    batches,
                    kernel_size,
                    out_channels,
                    padding,
                    stride,
                ) in itertools.product(
                    nbatches, kernel_sizes, ochannels, paddings, strides
                ):
                    input_size = (batches, in_channels, signal_size)
                    signal = get_random_test_tensor(size=input_size, is_float=True)

                    if func_name == "conv1d":
                        k_size = (out_channels, in_channels, kernel_size)
                    else:
                        k_size = (in_channels, out_channels, kernel_size)
                    kernel = get_random_test_tensor(size=k_size, is_float=True)

                    reference = getattr(F, func_name)(
                        signal, kernel, padding=padding, stride=stride
                    )
                    encrypted_signal = ArithmeticSharedTensor(signal)
                    encrypted_kernel = kernel_type(kernel)
                    encrypted_conv = getattr(encrypted_signal, func_name)(
                        encrypted_kernel, padding=padding, stride=stride
                    )

                    self._check(encrypted_conv, reference, f"{func_name} failed")
Ejemplo n.º 23
0
    def test_autograd_func_take(self):
        """Tests the part of autograd take that does not have a torch equivalent"""
        tensor_size = [5, 5, 5, 5]
        index = torch.tensor([[[1, 2], [3, 4]], [[4, 2], [1, 3]]],
                             dtype=torch.long)

        # Test when dimension!=None
        for dimension in range(0, 4):
            tensor = get_random_test_tensor(size=tensor_size, is_float=True)
            ref_forward = torch.from_numpy(tensor.numpy().take(
                index, dimension))
            encrypted_tensor = crypten.cryptensor(tensor)
            encr_inputs = [encrypted_tensor, index, dimension]

            # test forward
            ctx = AutogradContext()
            grad_fn_take = gradients.get_grad_fn("take")
            encr_output = grad_fn_take.forward(ctx, encr_inputs)
            self._check(encr_output, ref_forward,
                        "take forward failed: dimension set")

            # test backward:
            # first, recreate take forward function with only torch operations
            tensor2 = get_random_test_tensor(size=tensor_size, is_float=True)
            tensor2.requires_grad = True
            all_indices = [slice(0, x) for x in tensor2.size()]
            all_indices[dimension] = index
            ref_forward_torch = tensor2[all_indices]
            grad_output = torch.ones(ref_forward_torch.size())
            ref_forward_torch.backward(grad_output)

            # next, do backward pass on encrypted tensor
            encr_grad_output = encr_output.new(grad_output)
            encr_grad = grad_fn_take.backward(ctx, encr_grad_output)

            # finally, compare values
            self._check(encr_grad, tensor2.grad,
                        "take backward failed: dimension set")
Ejemplo n.º 24
0
    def test_get_set(self):
        for tensor_type in [lambda x: x, BinarySharedTensor]:
            for size in range(1, 5):
                # Test __getitem__
                tensor = get_random_test_tensor(size=(size, size), is_float=False)
                reference = tensor[:, 0]

                encrypted_tensor = BinarySharedTensor(tensor)
                encrypted_out = encrypted_tensor[:, 0]
                self._check(encrypted_out, reference, "getitem failed")

                reference = tensor[0, :]
                encrypted_out = encrypted_tensor[0, :]
                self._check(encrypted_out, reference, "getitem failed")

                # Test __setitem__
                tensor2 = get_random_test_tensor(size=(size,), is_float=False)
                reference = tensor.clone()
                reference[:, 0] = tensor2

                encrypted_out = BinarySharedTensor(tensor)
                encrypted2 = tensor_type(tensor2)
                encrypted_out[:, 0] = encrypted2

                self._check(
                    encrypted_out, reference, "%s setitem failed" % type(encrypted2)
                )

                reference = tensor.clone()
                reference[0, :] = tensor2

                encrypted_out = BinarySharedTensor(tensor)
                encrypted2 = tensor_type(tensor2)
                encrypted_out[0, :] = encrypted2

                self._check(
                    encrypted_out, reference, "%s setitem failed" % type(encrypted2)
                )
Ejemplo n.º 25
0
    def _check_forward_backward(
        self, func_name, input_tensor, *args, msg=None, **kwargs
    ):
        """Checks forward and backward against PyTorch

        Args:
            func_name (str): PyTorch/CrypTen function name
            input_tensor (torch.tensor): primary input
            args (list): contains arguments for function
            msg (str): additional message for mismatch
            kwargs (list): keyword arguments for function
        """

        if msg is None:
            msg = f"{func_name} grad_fn incorrect"

        input = input_tensor.clone()
        input.requires_grad = True
        input_encr = AutogradCrypTensor(crypten.cryptensor(input), requires_grad=True)

        for private in [False, True]:
            input.grad = None
            input_encr.grad = None
            args = self._set_grad_to_zero(args)
            args_encr = self._set_grad_to_zero(list(args), make_private=private)

            # check forward pass
            torch_func = self._get_torch_func(func_name)
            reference = torch_func(input, *args, **kwargs)
            encrypted_out = getattr(input_encr, func_name)(*args_encr, **kwargs)

            # extract argmax output for max / min with keepdim=False
            if isinstance(encrypted_out, (list, tuple)):
                reference = reference[0]
                encrypted_out = encrypted_out[0]

            self._check(encrypted_out, reference, msg + " in forward")

            # check backward pass
            grad_output = get_random_test_tensor(
                max_value=2, size=reference.size(), is_float=True
            )
            grad_output_encr = crypten.cryptensor(grad_output)
            reference.backward(grad_output)
            encrypted_out.backward(grad_output_encr)

            self._check(input_encr.grad, input.grad, msg + " in backward")
            for i, arg_encr in enumerate(args_encr):
                if crypten.is_encrypted_tensor(arg_encr):
                    self._check(arg_encr.grad, args[i].grad, msg + " in backward args")
Ejemplo n.º 26
0
    def _conv1d(self, signal_size, in_channels):
        """Test convolution of encrypted tensor with public/private tensors."""
        nbatches = [1, 3]
        nout_channels = [1, 5]
        kernel_sizes = [1, 2, 3]
        paddings = [0, 1]
        strides = [1, 2]

        for batches in nbatches:
            size = (batches, in_channels, signal_size)
            signal = get_random_test_tensor(size=size, is_float=True)

            for kernel_size, out_channels in itertools.product(
                kernel_sizes, nout_channels
            ):
                kernel_size = (out_channels, in_channels, kernel_size)
                kernel = get_random_test_tensor(size=kernel_size, is_float=True)

                for padding in paddings:
                    for stride in strides:
                        self._check_forward_backward(
                            "conv1d", signal, kernel, stride=stride, padding=padding
                        )
Ejemplo n.º 27
0
    def test_pos_pow(self):
        """Test gradient crypten pos_pow"""
        for power in [3, -2, 1.75]:
            # ensure base is positive for pos_pow
            tensor = get_random_test_tensor(is_float=True, max_value=2) + 4
            tensor.requires_grad = True
            tensor_encr = crypten.cryptensor(tensor, requires_grad=True)

            reference = tensor.pow(power)
            out_encr = tensor_encr.pos_pow(power)
            self._check(out_encr, reference,
                        f"pos_pow forward failed with power {power}")

            grad_out = get_random_test_tensor(is_float=True)
            grad_out_encr = crypten.cryptensor(grad_out)
            reference.backward(grad_out)
            out_encr.backward(grad_out_encr)

            self._check(
                tensor_encr.grad,
                tensor.grad,
                f"pos_pow backward failed with power {power}",
            )
Ejemplo n.º 28
0
    def test_patched_matmul(self):
        """Test torch.matmul on CUDALongTensor"""
        input_sizes = [
            (5,),
            (5, 5),
            (5,),
            (5, 5),
            (5, 5, 5),
            (5,),
            (5, 5, 5, 5),
            (5, 5),
        ]
        other_sizes = [
            (5,),
            (5, 5),
            (5, 5),
            (5,),
            (5,),
            (5, 5, 5),
            (5, 5),
            (5, 5, 5, 5),
        ]

        for x_size, y_size in zip(input_sizes, other_sizes):
            x = get_random_test_tensor(size=x_size, max_value=2 ** 62, is_float=False)
            x_cuda = CUDALongTensor(x)

            y = get_random_test_tensor(size=y_size, max_value=2 ** 62, is_float=False)
            y_cuda = CUDALongTensor(y)

            z = torch.matmul(x_cuda, y_cuda)
            self.assertTrue(
                type(z) == CUDALongTensor, "result should be a CUDALongTensor"
            )

            reference = torch.matmul(x, y)
            self._check_int(z.cpu(), reference, "matmul failed for cuda_patches")
Ejemplo n.º 29
0
    def test_save_load(self):
        """Test that crypten.save and crypten.load properly save and load
        shares of cryptensors"""
        import io
        import pickle

        def custom_load_function(f):
            obj = pickle.load(f)
            return obj

        def custom_save_function(obj, f):
            pickle.dump(obj, f)

        all_save_fns = [torch.save, custom_save_function]
        all_load_fns = [torch.load, custom_load_function]

        tensor = get_random_test_tensor()
        cryptensor1 = crypten.cryptensor(tensor)

        for i, save_closure in enumerate(all_save_fns):
            load_closure = all_load_fns[i]
            f = [
                io.BytesIO() for i in range(crypten.communicator.get().get_world_size())
            ]
            crypten.save(cryptensor1, f[self.rank], save_closure=save_closure)
            f[self.rank].seek(0)
            cryptensor2 = crypten.load(f[self.rank], load_closure=load_closure)
            # test whether share matches
            self.assertTrue(cryptensor1.share.allclose(cryptensor2.share))
            # test whether tensor matches
            self.assertTrue(
                cryptensor1.get_plain_text().allclose(cryptensor2.get_plain_text())
            )
            attributes = [
                a
                for a in dir(cryptensor1)
                if not a.startswith("__")
                and not callable(getattr(cryptensor1, a))
                and a not in ["share", "_tensor", "ctx"]
            ]
            for a in attributes:
                attr1, attr2 = getattr(cryptensor1, a), getattr(cryptensor2, a)
                if a == "encoder":
                    self.assertTrue(attr1._scale == attr2._scale)
                    self.assertTrue(attr1._precision_bits == attr2._precision_bits)
                elif torch.is_tensor(attr1):
                    self.assertTrue(attr1.eq(attr2).all())
                else:
                    self.assertTrue(attr1 == attr2)
Ejemplo n.º 30
0
    def _check_max_pool2d_forward_backward(self,
                                           image,
                                           kernel_size,
                                           padding,
                                           stride,
                                           tol=0.1):
        """Checks forward and backward are for max pool 2d.
        Verifies gradients by checking sum of non-matching elements to account for
        differences in tie resolution in max between PyTorch and CrypTen:
        PyTorch returns smallest index for max entries,
        whereas CrypTen returns a random index.

        Args:
            image (torch.tensor): input
            kernel_size (tuple of ints): size of the window over which to compute max
            padding (int or tuple of ints): implicit zero padding to added on both sides
            stride (int or tuple of ints): the stride of the window
        """
        # check forward
        image = image.clone()
        image.requires_grad = True
        image_enc = crypten.cryptensor(image, requires_grad=True)

        out = torch.nn.functional.max_pool2d(image,
                                             kernel_size,
                                             padding=padding,
                                             stride=stride)
        out_enc = image_enc.max_pool2d(kernel_size,
                                       padding=padding,
                                       stride=stride)
        self._check(out_enc, out, "max_pool2d forward incorrect")

        # check backward
        grad_output = get_random_test_tensor(size=out.size(), is_float=True)
        grad_output_enc = crypten.cryptensor(grad_output)
        out.backward(grad_output)
        out_enc.backward(grad_output_enc)

        # check sum of non-matching gradient entries
        crypten_grad = image_enc.grad.get_plain_text()
        non_matching_indices = (image.grad - crypten_grad).abs() > tol
        sum_is_close = (crypten_grad[non_matching_indices].sum() -
                        image.grad[non_matching_indices].sum()) < tol
        if not sum_is_close:
            msg = "max_pool2d backward failed"
            logging.info(msg)
            logging.info(f"Result: crypten image gradient {crypten_grad}")
            logging.info(f"Result - Reference {image.grad - crypten_grad}")
            self.assertTrue(sum_is_close, msg=msg)