Example #1
0
def spdz_mul(cmd: Callable, x_sh, y_sh, crypto_provider: AbstractWorker,
             field: int, dtype: str):
    """Abstractly multiplies two tensors (mul or matmul)

    Args:
        cmd: a callable of the equation to be computed (mul or matmul)
        x_sh (AdditiveSharingTensor): the left part of the operation
        y_sh (AdditiveSharingTensor): the right part of the operation
        crypto_provider (AbstractWorker): an AbstractWorker which is used to generate triples
        field (int): an integer denoting the size of the field
        dtype (str): denotes the dtype of shares

    Return:
        an AdditiveSharingTensor
    """
    assert isinstance(x_sh, sy.AdditiveSharingTensor)
    assert isinstance(y_sh, sy.AdditiveSharingTensor)

    locations = x_sh.locations
    torch_dtype = x_sh.torch_dtype

    # Get triples
    a, b, a_mul_b = request_triple(crypto_provider, cmd, field, dtype,
                                   x_sh.shape, y_sh.shape, locations)

    delta = x_sh - a
    epsilon = y_sh - b
    # Reconstruct and send to all workers
    delta = delta.reconstruct()
    epsilon = epsilon.reconstruct()

    delta_epsilon = cmd(delta, epsilon)

    # Trick to keep only one child in the MultiPointerTensor (like in SNN)
    j1 = torch.ones(delta_epsilon.shape).type(torch_dtype).send(
        locations[0], **no_wrap)
    j0 = torch.zeros(delta_epsilon.shape).type(torch_dtype).send(
        *locations[1:], **no_wrap)
    if len(locations) == 2:
        j = sy.MultiPointerTensor(children=[j1, j0])
    else:
        j = sy.MultiPointerTensor(children=[j1] + list(j0.child.values()))

    delta_b = cmd(delta, b)
    a_epsilon = cmd(a, epsilon)
    res = delta_epsilon * j + delta_b + a_epsilon + a_mul_b
    res = res.type(torch_dtype)
    return res
Example #2
0
def maxpool_deriv(x_sh):
    """Compute derivative of MaxPool

    Args:
        x_sh (AdditiveSharingTensor): the private tensor on which the op applies

    Returns:
        an AdditiveSharingTensor of the same shape as x_sh full of zeros except for
        a 1 at the position of the max value
    """
    if x_sh.dtype == "custom":
        raise TypeError(
            "`custom` dtype shares are unsupported in SecureNN, use dtype = `long` or `int` instead"
        )

    workers = x_sh.locations
    crypto_provider = x_sh.crypto_provider
    L = x_sh.field
    dtype = get_dtype(L)
    torch_dtype = get_torch_dtype(L)

    n1, n2 = x_sh.shape
    n = n1 * n2
    if L % n != 0:
        raise ValueError("Check x_sh.field and x_sh.shape ")
    x_sh = x_sh.view(-1)

    # Common Randomness
    U_sh = _shares_of_zero(n, L, dtype, crypto_provider, *workers)

    r = _random_common_value(L, *workers)

    # 1)
    _, ind_max_sh = maxpool(x_sh)

    # 2)
    j = sy.MultiPointerTensor(children=[
        torch.tensor([int(i == 0)]).send(w, **no_wrap)
        for i, w in enumerate(workers)
    ])
    k_sh = ind_max_sh + j * r

    # 3)
    t = k_sh.get()
    k = t % n
    E_k = torch.zeros(n, dtype=torch_dtype)
    E_k[k] = 1
    E_sh = E_k.share(*workers, field=L, dtype=dtype, **no_wrap)

    # 4)
    g = r % n
    D_sh = torch.roll(E_sh, -g)

    maxpool_d_sh = D_sh + U_sh
    return maxpool_d_sh.view(n1, n2)
Example #3
0
def _random_common_value(max_value, *workers):
    """
    Return n in [0, max_value-1] chosen by a worker and sent to all workers,
    in the form of a MultiPointerTensor
    """
    pointer = torch.LongTensor([1]).send(workers[0]).random_(max_value)
    pointers = [pointer]
    for worker in workers[1:]:
        pointers.append(pointer.copy().move(worker))
    common_value = sy.MultiPointerTensor(children=pointers)

    return common_value
Example #4
0
def _random_common_bit(*workers):
    """
    Return a bit chosen by a worker and sent to all workers,
    in the form of a MultiPointerTensor
    """
    pointer = torch.tensor([1]).send(workers[0], **no_wrap).random_(2)
    pointers = [pointer]
    for worker in workers[1:]:
        pointers.append(pointer.copy().move(worker))
    bit = sy.MultiPointerTensor(children=pointers)

    return bit
Example #5
0
def maxpool_deriv(x_sh):
    """ Compute derivative of MaxPool

    Args:
        x_sh (AdditiveSharingTensor): the private tensor on which the op applies

    Returns:
        an AdditiveSharingTensor of the same shape as x_sh full of zeros except for
        a 1 at the position of the max value
    """
    assert (
        x_sh.dtype != "custom"
    ), "`custom` dtype shares are unsupported in SecureNN, use dtype = `long` or `int` instead"

    alice, bob = x_sh.locations
    crypto_provider = x_sh.crypto_provider
    L = x_sh.field
    dtype = get_dtype(L)
    torch_dtype = get_torch_dtype(L)

    n1, n2 = x_sh.shape
    n = n1 * n2
    x_sh = x_sh.view(-1)

    # Common Randomness
    U_sh = _shares_of_zero(n, L, dtype, crypto_provider, alice, bob)

    r = _random_common_value(L, alice, bob)

    # 1)
    _, ind_max_sh = maxpool(x_sh)

    # 2)
    j = sy.MultiPointerTensor(children=[
        torch.tensor([1]).send(alice, **no_wrap),
        torch.tensor([0]).send(bob, **no_wrap)
    ])
    k_sh = ind_max_sh + j * r

    # 3)
    t = k_sh.get()
    k = t % n
    E_k = torch.zeros(n, dtype=torch_dtype)
    E_k[k] = 1
    E_sh = E_k.share(alice, bob, field=L, dtype=dtype, **no_wrap)

    # 4)
    g = r % n
    D_sh = torch.roll(E_sh, -g)

    maxpool_d_sh = D_sh + U_sh
    return maxpool_d_sh.view(n1, n2)
Example #6
0
def test_xor_implementation(workers):
    alice, bob, james = workers["alice"], workers["bob"], workers["james"]
    r = decompose(th.tensor([3])).send(alice, bob).child
    x_bit_sh = decompose(th.tensor([23])).share(alice, bob, crypto_provider=james).child
    j0 = torch.zeros(x_bit_sh.shape).long().send(bob)
    j1 = torch.ones(x_bit_sh.shape).long().send(alice)
    j = syft.MultiPointerTensor(children=[j0, j1])
    w = (j * r) + x_bit_sh - (2 * x_bit_sh * r)

    r_real = r.virtual_get()[0]
    x_real = x_bit_sh.virtual_get()
    w_real = r_real + x_real - 2 * r_real * x_real
    assert (w.virtual_get() == w_real).all()
Example #7
0
def _random_common_value(max_value, *workers):
    """
    Return n in [0, max_value-1] chosen by a worker and sent to all workers,
    in the form of a MultiPointerTensor
    """
    torch_dtype = get_torch_dtype(max_value)
    pointer = (torch.tensor([1], dtype=torch_dtype).send(
        workers[0], **no_wrap).random_(1, get_max_val_field(max_value)))
    pointers = [pointer]
    for worker in workers[1:]:
        pointers.append(pointer.copy().move(worker))
    common_value = sy.MultiPointerTensor(children=pointers)

    return common_value
Example #8
0
    def reconstruct(self):
        """
        Reconstruct the shares of the AdditiveSharingTensor remotely without
        its owner being able to see any sensitive value

        Returns:
            A MultiPointerTensor where all workers hold the reconstructed value
        """
        workers = self.locations

        ptr_to_sh = self.copy().wrap().send(workers[0], **no_wrap)
        pointer = ptr_to_sh.remote_get()

        pointers = [pointer] + [pointer.copy().move(w) for w in workers[1:]]

        return sy.MultiPointerTensor(children=pointers)
Example #9
0
def maxpool_deriv(x_sh):
    """ Compute derivative of MaxPool

    Args:
        x_sh (AdditiveSharingTensor): the private tensor on which the op applies

    Returns:
        an AdditiveSharingTensor of the same shape as x_sh full of zeros except for
        a 1 at the position of the max value
    """
    alice, bob = x_sh.locations
    crypto_provider = x_sh.crypto_provider
    L = x_sh.field

    n1, n2 = x_sh.shape
    n = n1 * n2
    x_sh = x_sh.view(-1)

    # Common Randomness
    U_sh = _shares_of_zero(n, L, crypto_provider, alice, bob)
    r = _random_common_value(L, alice, bob)

    # 1)
    _, ind_max_sh = maxpool(x_sh)

    # 2)
    j = sy.MultiPointerTensor(
        children=[torch.tensor([1]).send(alice),
                  torch.tensor([0]).send(bob)])
    k_sh = ind_max_sh + j * r

    # 3)
    t = k_sh.get()
    k = t % n
    E_k = torch.zeros(n)
    E_k[k] = 1
    E_sh = E_k.share(alice, bob).child

    # 4)
    g = r % n
    D_sh = torch.roll(E_sh, -g)

    maxpool_d_sh = D_sh + U_sh
    return maxpool_d_sh.view(n1, n2)
Example #10
0
def relu_deriv(a_sh):
    """
    Compute the derivative of Relu

    Args:
        a_sh (AdditiveSharingTensor): the private tensor on which the op applies

    Returns:
        0 if Dec(a_sh) < 0
        1 if Dec(a_sh) > 0
        encrypted in an AdditiveSharingTensor
    """
    if a_sh.dtype == "custom":
        raise TypeError(
            "`custom` dtype shares are unsupported in SecureNN, use dtype = `long` or `int` instead"
        )

    workers = a_sh.locations
    crypto_provider = a_sh.crypto_provider
    L = a_sh.field
    dtype = get_dtype(L)
    torch_dtype = get_torch_dtype(L)
    # Common randomness
    u = _shares_of_zero(1, L, dtype, crypto_provider, *workers)

    # 1)
    y_sh = a_sh * 2

    # 2) Not applicable with algebraic shares
    y_sh = share_convert(y_sh)

    # 3)
    alpha_sh = msb(y_sh)

    # 4)
    j = sy.MultiPointerTensor(children=[
        torch.tensor([int(i == 0)], dtype=torch_dtype).send(w, **no_wrap)
        for i, w in enumerate(workers)
    ])
    gamma_sh = j - alpha_sh + u
    return gamma_sh
Example #11
0
    def detail(worker: AbstractWorker, tensor_tuple: tuple) -> "MultiPointerTensor":
        """
        This function reconstructs a MultiPointerTensor given it's attributes in form of a tuple.
        Args:
            worker: the worker doing the deserialization
            tensor_tuple: a tuple holding the attributes of the MultiPointerTensor
        Returns:
            MultiPointerTensor: a MultiPointerTensor
        Examples:
            multi_pointer_tensor = detail(data)
        """

        tensor_id, chain = tensor_tuple

        tensor = sy.MultiPointerTensor(owner=worker, id=sy.serde._detail(worker, tensor_id))

        if chain is not None:
            chain = sy.serde._detail(worker, chain)
            tensor.child = chain

        return tensor
Example #12
0
def relu_deriv(a_sh):
    """
    Compute the derivative of Relu

    Args:
        a_sh (AdditiveSharingTensor): the private tensor on which the op applies

    Returns:
        0 if Dec(a_sh) < 0
        1 if Dec(a_sh) > 0
        encrypted in an AdditiveSharingTensor
    """
    assert (
        a_sh.dtype != "custom"
    ), "`custom` dtype shares are unsupported in SecureNN, use dtype = `long` or `int` instead"

    alice, bob = a_sh.locations
    crypto_provider = a_sh.crypto_provider
    L = a_sh.field
    dtype = get_dtype(L)
    # Common randomness
    u = _shares_of_zero(1, L, dtype, crypto_provider, alice, bob)

    # 1)
    y_sh = a_sh * 2

    # 2) Not applicable with algebraic shares
    y_sh = share_convert(y_sh)

    # 3)
    alpha_sh = msb(y_sh)

    # 4)
    j = sy.MultiPointerTensor(children=[
        torch.tensor([0]).send(alice, **no_wrap),
        torch.tensor([1]).send(bob, **no_wrap)
    ])
    gamma_sh = j - alpha_sh + u
    return gamma_sh
Example #13
0
def relu_deriv(a_sh):
    """
    Compute the derivative of Relu

    Args:
        a_sh (AdditiveSharingTensor): the private tensor on which the op applies

    Returns:
        0 if Dec(a_sh) < 0
        1 if Dec(a_sh) > 0
        encrypted in an AdditiveSharingTensor
    """

    alice, bob = a_sh.locations
    crypto_provider = a_sh.crypto_provider
    L = a_sh.field

    # Common randomness
    u = _shares_of_zero(1, L, crypto_provider, alice, bob)

    # 1)
    y_sh = a_sh * 2

    # 2) Not applicable with algebraic shares
    y_sh = share_convert(y_sh)
    # y_sh.field = L - 1

    # 3)
    alpha_sh = msb(y_sh)
    assert alpha_sh.field == L

    # 4)
    j = sy.MultiPointerTensor(children=[
        torch.tensor([0]).send(alice, **no_wrap),
        torch.tensor([1]).send(bob, **no_wrap)
    ])
    gamma_sh = j - alpha_sh + u
    assert gamma_sh.field == L
    return gamma_sh
Example #14
0
def msb(a_sh, alice, bob):
    """
    Compute the most significant bit in a_sh

    args:
        a_sh (AdditiveSharingTensor): the tensor of study
        alice (AbstractWorker): 1st worker holding a private share of a_sh
        bob (AbstractWorker): 2nd worker holding a private share

    return:
        the most significant bit
    """

    crypto_provider = a_sh.crypto_provider
    L = a_sh.field

    input_shape = a_sh.shape
    a_sh = a_sh.view(-1)

    # the commented out numbers below correspond to the
    # line numbers in Table 5 of the SecureNN paper
    # https://eprint.iacr.org/2018/442.pdf

    # 1)
    x = torch.LongTensor(a_sh.shape).random_(L - 1)
    x_bit = decompose(x)
    x_sh = x.share(bob, alice, crypto_provider=crypto_provider).child
    # Get last column / value as decompose reverts bits: first one is in last position
    x_bit_0 = x_bit[..., -1:]
    x_bit_sh_0 = x_bit_0.share(bob, alice, crypto_provider=crypto_provider
                               ).child  # least -> greatest from left -> right
    x_bit_sh = x_bit.share(bob, alice, crypto_provider=crypto_provider).child

    # 2)
    y_sh = 2 * a_sh

    r_sh = y_sh + x_sh

    # 3)
    r = r_sh.get(
    )  # .send(bob, alice) #TODO: make this secure by exchanging shares remotely
    r_0 = decompose(r)[..., -1].send(bob, alice).child
    r = r.send(bob, alice).child

    assert isinstance(r, sy.MultiPointerTensor)

    j0 = torch.zeros(x_bit_sh.shape).long().send(bob)
    j1 = torch.ones(x_bit_sh.shape).long().send(alice)
    j = sy.MultiPointerTensor(children=[j0, j1])
    j_0 = j[..., -1]

    assert isinstance(j, sy.MultiPointerTensor)
    assert isinstance(j_0, sy.MultiPointerTensor)

    # 4)
    BETA = (torch.rand(a_sh.shape) > 0.5).long().send(bob, alice).child
    BETA_prime = private_compare(x_bit_sh,
                                 r,
                                 BETA=BETA,
                                 j=j,
                                 alice=alice,
                                 bob=bob,
                                 crypto_provider=crypto_provider).long()

    # 5)
    BETA_prime_sh = BETA_prime.share(bob,
                                     alice,
                                     crypto_provider=crypto_provider).child

    # 7)
    _lambda = BETA_prime_sh + (j_0 * BETA) - (2 * BETA * BETA_prime_sh)

    # 8)
    _delta = x_bit_sh_0.squeeze(-1) + (j_0 * r_0) - (2 * r_0 *
                                                     x_bit_sh_0.squeeze(-1))

    # 9)
    theta = _lambda * _delta

    # 10)
    u = (torch.zeros(list(theta.shape)).long().share(
        alice, bob, crypto_provider=crypto_provider).child)
    a = _lambda + _delta - (2 * theta) + u

    return a.view(*list(input_shape))
Example #15
0
def msb(a_sh):
    """
    Compute the most significant bit in a_sh, this is an implementation of the
    SecureNN paper https://eprint.iacr.org/2018/442.pdf

    Args:
        a_sh (AdditiveSharingTensor): the tensor of study
    Return:
        the most significant bit
    """

    workers = a_sh.locations
    crypto_provider = a_sh.crypto_provider
    L = a_sh.field + 1  # field of a is L - 1
    dtype = get_dtype(L)
    input_shape = a_sh.shape
    a_sh = a_sh.view(-1)

    # the commented out numbers below correspond to the
    # line numbers in Table 5 of the SecureNN paper
    # https://eprint.iacr.org/2018/442.pdf

    # Common Randomness
    beta = _random_common_bit(*workers)
    u = _shares_of_zero(1, L, dtype, crypto_provider, *workers)

    # 1)
    x = torch.tensor(a_sh.shape).random_(get_max_val_field(L - 1))
    x_bit = decompose(x, L)
    x_sh = x.share(*workers,
                   field=L - 1,
                   dtype="custom",
                   crypto_provider=crypto_provider,
                   **no_wrap)
    x_bit_0 = x_bit[..., 0]
    x_bit_sh_0 = x_bit_0.share(*workers,
                               field=L,
                               crypto_provider=crypto_provider,
                               **no_wrap)
    x_bit_sh = x_bit.share(*workers,
                           field=p,
                           dtype="custom",
                           crypto_provider=crypto_provider,
                           **no_wrap)

    # 2)
    y_sh = a_sh * 2
    r_sh = y_sh + x_sh

    # 3)
    r = r_sh.reconstruct(
    )  # convert an additive sharing in multi pointer Tensor
    r_0 = decompose(r, L)[..., 0]

    # 4)
    beta_prime = private_compare(x_bit_sh, r, beta, L)

    # 5)
    beta_prime_sh = beta_prime.share(*workers,
                                     field=L,
                                     dtype=dtype,
                                     crypto_provider=crypto_provider,
                                     **no_wrap)

    # 7)
    j = sy.MultiPointerTensor(children=[
        torch.tensor([int(i == 0)]).send(w, **no_wrap)
        for i, w in enumerate(workers)
    ])
    gamma = beta_prime_sh + (j * beta) - (2 * beta * beta_prime_sh)

    # 8)
    delta = x_bit_sh_0 + (j * r_0) - (2 * r_0 * x_bit_sh_0)

    # 9)
    theta = gamma * delta

    # 10)
    a = gamma + delta - (theta * 2) + u

    if len(input_shape):
        return a.view(*list(input_shape))
    else:
        return a
Example #16
0
def private_compare(x_bit_sh, r, beta, L):
    """
    Perform privately x > r

    args:
        x (AdditiveSharedTensor): the private tensor
        r (MultiPointerTensor): the threshold commonly held by the workers
        beta (MultiPointerTensor): a boolean commonly held by the workers to
            hide the result of computation for the crypto provider
        L(int): field size for r

    return:
        β′ = β ⊕ (x > r).
    """
    assert isinstance(x_bit_sh, sy.AdditiveSharingTensor)
    assert isinstance(r, sy.MultiPointerTensor)
    assert isinstance(beta, sy.MultiPointerTensor)
    # Would it be safer to have a different r/beta for each value in the tensor?

    workers = x_bit_sh.locations
    crypto_provider = x_bit_sh.crypto_provider
    p = x_bit_sh.field

    # the commented out numbers below correspond to the
    # line numbers in Algorithm 3 of the SecureNN paper
    # https://eprint.iacr.org/2018/442.pdf

    # Common randomess
    s = torch.randint(1, p, x_bit_sh.shape).send(*workers, **no_wrap)
    u = torch.randint(1, p, x_bit_sh.shape).send(*workers, **no_wrap)
    perm = torch.randperm(x_bit_sh.shape[-1]).send(*workers, **no_wrap)

    j = sy.MultiPointerTensor(children=[
        torch.tensor([int(i == 0)]).send(w, **no_wrap)
        for i, w in enumerate(workers)
    ])

    # 1)
    t = r + 1
    t_bit = decompose(t, L)
    r_bit = decompose(r, L)

    # if beta == 0
    # 5)
    w = x_bit_sh + (j * r_bit) - (2 * r_bit * x_bit_sh)
    # 6)
    wc = w.flip(-1).cumsum(-1).flip(-1) - w
    c_beta0 = -x_bit_sh + (j * r_bit) + j + wc

    # elif beta == 1 AND r != 2^l- 1
    # 8)
    w = x_bit_sh + (j * t_bit) - (2 * t_bit * x_bit_sh)
    # 9)
    wc = w.flip(-1).cumsum(-1).flip(-1) - w
    c_beta1 = x_bit_sh + (-j * t_bit) + j + wc

    # else
    # 11)
    c_igt1 = (1 - j) * (u + 1) - (j * u)
    c_ie1 = (1 - 2 * j) * u

    l1_mask = torch.zeros(x_bit_sh.shape).long()
    l1_mask[..., 0] = 1
    l1_mask = l1_mask.send(*workers, **no_wrap)
    # c_else = if i == 1 c_ie1 else c_igt1
    c_else = (l1_mask * c_ie1) + ((1 - l1_mask) * c_igt1)

    # Mask for the case r == 2^l −1
    r_mask = (r == get_r_mask(L)).long()
    r_mask = r_mask.unsqueeze(-1)

    # Mask combination to execute the if / else statements of 4), 7), 10)
    c = (1 - beta) * c_beta0 + (beta *
                                (1 - r_mask)) * c_beta1 + (beta *
                                                           r_mask) * c_else

    # 14)
    # Hide c values
    mask = s * c

    # Permute the mask
    # I have to create idx because Ellipsis are still not supported
    # (I would like to do permuted_mask = mask[..., perm])
    idx = [slice(None)] * (len(x_bit_sh.shape) - 1) + [perm]
    permuted_mask = mask[idx]
    # Send it to another worker
    # We do this because we can't allow the local worker to get and see permuted_mask
    # because otherwise it can inverse the permutation and remove s to get c.
    # So opening the permuted_mask should be made by a worker which doesn't have access
    # to the randomness
    remote_mask = permuted_mask.wrap().send(crypto_provider, **no_wrap)

    # 15)
    d_ptr = remote_mask.remote_get()
    beta_prime = (d_ptr == 0).sum(-1)

    # Get result back
    res = beta_prime.get()
    return res
Example #17
0
    def send(
        self,
        *location,
        inplace: bool = False,
        # local_autograd=False,
        # preinitialize_grad=False,
        no_wrap=False,
        garbage_collect_data=True,
    ):
        """Gets the pointer to a new remote object.

        One of the most commonly used methods in PySyft, this method serializes
        the object upon which it is called (self), sends the object to a remote
        worker, creates a pointer to that worker, and then returns that pointer
        from this function.

        Args:
            location: The BaseWorker object which you want to send this object
                to. Note that this is never actually the BaseWorker but instead
                a class which instantiates the BaseWorker abstraction.
            inplace: if true,
              return the same object instance, else a new wrapper
            # local_autograd: Use autograd system on the local machine instead
              of TensorFlow's autograd on the workers.
            # preinitialize_grad: Initialize gradient for AutogradTensors
              to a tensor
            no_wrap: If True, wrap() is called on the created pointer
            garbage_collect_data: argument passed down to create_pointer()

        Returns:
            A tf.EagerTensor[PointerTensor] pointer to self. Note that this
            object will likely be wrapped by a tf.EagerTensor wrapper.
        """

        # If you send a pointer p1, you want the pointer to pointer p2
        # to control the garbage collection and not the remaining old
        # p1 (here self). Because if p2 is not GCed, GCing p1 shouldn't delete
        # the remote tensor, but if you want to do so, as p2 is not GCed,
        # you can still do `del p2`. This allows to chain multiple
        # .send().send() calls.

        if len(location) == 1:

            location = location[0]

            if hasattr(self, "child") and isinstance(self.child,
                                                     PointerTensor):
                self.child.garbage_collect_data = False

            ptr = self.owner.send(self,
                                  location,
                                  garbage_collect_data=garbage_collect_data)

            ptr.description = self.description
            ptr.tags = self.tags

            # The last pointer should control remote GC,
            # not the previous self.ptr
            if hasattr(self, "ptr") and self.ptr is not None:
                ptr_ = self.ptr()
                if ptr_ is not None:
                    ptr_.garbage_collect_data = False

            # we need to cache this weak reference to the pointer so that
            # if this method gets called multiple times we can simply re-use
            # the same pointer which was previously created
            self.ptr = weakref.ref(ptr)

            if inplace:
                self.set_()  # TODO[jason]: pretty sure this is torch specific
                self.child = ptr
                return self
            else:
                output = (ptr if no_wrap else ptr.wrap(type=tf.Variable,
                                                       initial_value=[]))

        else:

            children = list()
            for loc in location:
                children.append(self.send(loc, no_wrap=True))

            output = syft.MultiPointerTensor(children=children)

            if not no_wrap:
                output = output.wrap(type=tf.Variable, initial_value=[])

        return output
Example #18
0
    def send(
        self,
        *location,
        inplace: bool = False,
        user: object = None,
        local_autograd: bool = False,
        requires_grad: bool = False,
        preinitialize_grad: bool = False,
        no_wrap: bool = False,
        garbage_collect_data: bool = True,
    ):
        """Gets the pointer to a new remote object.

        One of the most commonly used methods in PySyft, this method serializes the object upon
        which it is called (self), sends the object to a remote worker, creates a pointer to
        that worker, and then returns that pointer from this function.

        Args:
            location: The BaseWorker object which you want to send this object to. Note that
                this is never actually the BaseWorker but instead a class which instantiates the
                BaseWorker abstraction.
            inplace: if true, return the same object instance, else a new wrapper
            user (object,optional): User credentials to be verified.
            local_autograd: Use autograd system on the local machine instead of PyTorch's
                autograd on the workers.
            requires_grad: Default to False. If true, whenever the remote value of this tensor
                will have its gradient updated (for example when calling .backward()), a call
                will be made to set back the local gradient value.
            preinitialize_grad: Initialize gradient for AutogradTensors to a tensor
            no_wrap: If True, wrap() is called on the created pointer
            garbage_collect_data: argument passed down to create_pointer()

        Returns:
            A torch.Tensor[PointerTensor] pointer to self. Note that this
            object will likely be wrapped by a torch.Tensor wrapper.

        Raises:
                SendNotPermittedError: Raised if send is not permitted on this tensor.
        """

        # If you send a pointer p1, you want the pointer to pointer p2 to control
        # the garbage collection and not the remaining old p1 (here self). Because if
        # p2 is not GCed, GCing p1 shouldn't delete the remote tensor, but if you
        # want to do so, as p2 is not GCed, you can still do `del p2`.
        # This allows to chain multiple .send().send() calls.

        if len(location) == 1:

            location = location[0]

            if self.has_child() and isinstance(self.child, PointerTensor):
                self.child.garbage_collect_data = False
                if self._is_parameter():
                    self.data.child.garbage_collect_data = False

            ptr = self.owner.send(
                self,
                location,
                local_autograd=local_autograd,
                requires_grad=requires_grad,
                preinitialize_grad=preinitialize_grad,
                garbage_collect_data=garbage_collect_data,
            )

            ptr.description = self.description
            ptr.tags = self.tags

            # The last pointer should control remote GC, not the previous self.ptr
            if hasattr(self, "ptr") and self.ptr is not None:
                ptr_ = self.ptr()
                if ptr_ is not None:
                    ptr_.garbage_collect_data = False

            # we need to cache this weak reference to the pointer so that
            # if this method gets called multiple times we can simply re-use
            # the same pointer which was previously created
            self.ptr = weakref.ref(ptr)

            if self._is_parameter():
                if inplace:
                    self.is_wrapper = True
                    with torch.no_grad():
                        self.set_()
                    self.data = ptr
                    output = self
                else:
                    if no_wrap:
                        raise ValueError(
                            "Parameters can't accept no_wrap=True")
                    wrapper = torch.Tensor()
                    param_wrapper = torch.nn.Parameter(wrapper)
                    param_wrapper.is_wrapper = True
                    with torch.no_grad():
                        param_wrapper.set_()
                    param_wrapper.data = ptr
                    output = param_wrapper
            else:
                if inplace:
                    self.is_wrapper = True
                    self.set_()
                    self.child = ptr
                    return self
                else:
                    output = ptr if no_wrap else ptr.wrap()

            if self.requires_grad:
                # This is for AutogradTensor to work on MultiPointerTensors
                # With pre-initialized gradients, this should get it from AutogradTensor.grad
                if preinitialize_grad:
                    grad = output.child.grad
                else:
                    grad = output.attr("grad")

                output.grad = grad

                # Because of the way PyTorch works, .grad is prone to
                # create entirely new Python objects for the tensor, which
                # inadvertently deletes our custom attributes (like .child)
                # But, if we keep a backup reference around, PyTorch seems
                # to re-use it, which means .grad keeps the attributes we
                # want it to keep. #HackAlert
                output.backup_grad = grad

            if local_autograd:
                output = syft.AutogradTensor(
                    data=output,
                    preinitialize_grad=preinitialize_grad).on(output)

        else:

            children = []
            for loc in location:
                children.append(self.clone().send(loc, no_wrap=True))

            output = syft.MultiPointerTensor(children=children)

            if not no_wrap:
                output = output.wrap()

        return output
Example #19
0
def share_convert(a_sh):
    """
    Convert shares of a in field L to shares of a in field L - 1

    Args:
        a_sh (AdditiveSharingTensor): the additive sharing tensor who owns
            the shares in field L to convert

    Return:
        An additive sharing tensor with shares in field L-1
    """
    assert isinstance(a_sh, sy.AdditiveSharingTensor)

    workers = a_sh.locations
    crypto_provider = a_sh.crypto_provider
    L = a_sh.field

    # Common randomness
    eta_pp = _random_common_bit(*workers)
    r = _random_common_value(L, *workers)

    # Share remotely r
    r_sh = ((r * 1).child[workers[0].id].share(
        *workers, field=L, crypto_provider=crypto_provider).get().child)
    r_shares = r_sh.child

    alpha0 = ((r_shares[workers[0].id] +
               r_shares[workers[1].id].copy().move(workers[0])) >= L).long()
    alpha1 = alpha0.copy().move(workers[1])
    alpha = sy.MultiPointerTensor(children=[alpha0, alpha1])

    u_sh = _shares_of_zero(1, L - 1, crypto_provider, *workers)

    # 2)
    a_tilde_sh = a_sh + r_sh
    a_shares = a_sh.child
    beta0 = ((a_shares[workers[0].id] + r_shares[workers[0].id]) >= L).long()
    beta1 = ((a_shares[workers[1].id] + r_shares[workers[1].id]) >= L).long()
    beta = sy.MultiPointerTensor(children=[beta0, beta1])

    # 4)
    a_tilde_shares = a_tilde_sh.child
    delta = ((a_tilde_shares[workers[0].id].copy().get() +
              a_tilde_shares[workers[1].id].copy().get()) >= L).long()
    x = a_tilde_sh.get() % L

    # 5)
    x_bit = decompose(x)
    x_bit_sh = x_bit.share(*workers,
                           field=p,
                           crypto_provider=crypto_provider,
                           **no_wrap)
    delta_sh = delta.share(*workers,
                           field=L - 1,
                           crypto_provider=crypto_provider,
                           **no_wrap)

    # 6)
    eta_p = private_compare(x_bit_sh, r - 1, eta_pp)
    # 7)
    eta_p_sh = eta_p.share(*workers,
                           field=L - 1,
                           crypto_provider=crypto_provider,
                           **no_wrap)

    # 9)
    j = sy.MultiPointerTensor(children=[
        torch.tensor([0]).send(workers[0], **no_wrap),
        torch.tensor([1]).send(workers[1], **no_wrap),
    ])
    eta_sh = eta_p_sh + (1 - j) * eta_pp - 2 * eta_pp * eta_p_sh

    # 10)
    theta_sh = beta - (1 - j) * (alpha + 1) + delta_sh + eta_sh

    # 11)
    y_sh = -theta_sh + a_sh + u_sh
    return y_sh
Example #20
0
def share_convert_long(a_sh):
    """
    Convert shares of a in field L to shares of a in field L - 1

    Args:
        a_sh (AdditiveSharingTensor): the additive sharing tensor who owns
            the shares in field L to convert

    Return:
        An additive sharing tensor with shares in field L-1
    """
    assert isinstance(a_sh, sy.AdditiveSharingTensor)
    assert (
        a_sh.dtype != "custom"
    ), "`custom` dtype shares are unsupported in SecureNN, use dtype = `long` or `int` instead"

    workers = a_sh.locations
    crypto_provider = a_sh.crypto_provider
    L = a_sh.field
    torch_dtype = get_torch_dtype(L)
    dtype = get_dtype(L)
    # Common randomness
    eta_pp = _random_common_bit(*workers, dtype=torch_dtype)
    r = _random_common_value(L, *workers)

    # Share remotely r
    r_sh = ((r * 1).child[workers[0].id].share(
        *workers, field=L, dtype=dtype,
        crypto_provider=crypto_provider).get().child)
    r_shares = r_sh.child

    # WORKS WITH N PARTIES, NEEDS BUGFIXING IN WRAP
    # alpha0 = wrap(r_sh, L)
    # alphas = [alpha0.copy().move(w) for w in workers[1:]]
    # alpha = sy.MultiPointerTensor(children=[alpha0, *alphas])

    # WORKS WITH 2 PARTIES
    alpha0 = (((r_shares[workers[0].id] +
                r_shares[workers[1].id].copy().move(workers[0])) >
               get_max_val_field(L))).type(torch_dtype)
    alpha1 = alpha0.copy().move(workers[1])
    alpha = sy.MultiPointerTensor(children=[alpha0, alpha1])

    u_sh = _shares_of_zero(1, L, "custom", crypto_provider, *workers)

    # 2)
    a_tilde_sh = a_sh + r_sh
    a_shares = a_sh.child
    beta0 = (((a_shares[workers[0].id] + r_shares[workers[0].id]) >
              get_max_val_field(L)) +
             ((a_shares[workers[0].id] + r_shares[workers[0].id]) <
              get_min_val_field(L))).type(torch_dtype)
    beta1 = (((a_shares[workers[1].id] + r_shares[workers[1].id]) >
              get_max_val_field(L)) +
             ((a_shares[workers[1].id] + r_shares[workers[1].id]) <
              get_min_val_field(L))).type(torch_dtype)

    beta = sy.MultiPointerTensor(children=[beta0, beta1])

    # 4)
    a_tilde_shares = a_tilde_sh.child
    delta = a_tilde_shares[workers[0].id].copy().get() + a_tilde_shares[
        workers[1].id].copy().get()
    # Check for both positive and negative overflows
    delta = ((delta > get_max_val_field(L)) +
             (delta < get_min_val_field(L))).type(torch_dtype)
    x = a_tilde_sh.get()

    # 5)
    x_bit = decompose(x, L)
    x_bit_sh = x_bit.share(*workers,
                           field=p,
                           dtype="custom",
                           crypto_provider=crypto_provider,
                           **no_wrap)
    delta_sh = delta.share(*workers,
                           field=L - 1,
                           dtype="custom",
                           crypto_provider=crypto_provider,
                           **no_wrap)

    # 6)
    eta_p = private_compare(x_bit_sh, r - 1, eta_pp, L)
    # 7)
    eta_p_sh = eta_p.share(*workers,
                           field=L - 1,
                           dtype="custom",
                           crypto_provider=crypto_provider,
                           **no_wrap)

    # 9)
    j = sy.MultiPointerTensor(children=[
        torch.tensor([int(i != 0)]).send(w, **no_wrap)
        for i, w in enumerate(workers)
    ])
    eta_sh = eta_p_sh + (1 - j) * eta_pp - 2 * eta_pp * eta_p_sh
    # 10)
    theta_sh = beta - (1 - j) * (alpha + 1) + delta_sh + eta_sh
    # 11)
    # NOTE:
    # It seems simple operation with shares in L-1 field is enough to conver a_sh from L to L-1
    # Conversion of shares is handled internally in AST ops for custom dtype
    y_sh = u_sh + a_sh + theta_sh
    return y_sh
    def send(self,
             *location,
             inplace: bool = False,
             no_wrap=False,
             garbage_collect_data=True):
        """Gets the pointer to a new remote object.

        One of the most commonly used methods in PySyft, this method serializes
        the object upon which it is called (self), sends the object to a remote
        worker, creates a pointer to that worker, and then returns that pointer
        from this function.

        Args:
            location: The BaseWorker object which you want to send this object
                to. Note that this is never actually the BaseWorker but instead
                a class which instantiates the BaseWorker abstraction.
            inplace: if true,
              return the same object instance, else a new wrapper
            no_wrap: If True, wrap() is called on the created pointer
            garbage_collect_data: argument passed down to create_pointer()

        Returns:
            A tf.keras.layers.Layer[ObjectPointer] pointer to self. Note that this
            object will likely be wrapped by a ttf.keras.layers.Layer wrapper.
        """

        # If you send a pointer p1, you want the pointer to pointer p2
        # to control the garbage collection and not the remaining old
        # p1 (here self). Because if p2 is not GCed, GCing p1 shouldn't delete
        # the remote keras object, but if you want to do so, as p2 is not GCed,
        # you can still do `del p2`. This allows to chain multiple
        # .send().send() calls.

        if len(location) == 1:

            location = location[0]

            ptr = self.owner.send(self,
                                  location,
                                  garbage_collect_data=garbage_collect_data)

            ptr.description = self.description
            ptr.tags = self.tags

            # The last pointer should control remote GC,
            # not the previous self.ptr
            if hasattr(self, "ptr") and self.ptr is not None:
                ptr_ = self.ptr()
                if ptr_ is not None:
                    ptr_.garbage_collect_data = False

            # we need to cache this weak reference to the pointer so that
            # if this method gets called multiple times we can simply re-use
            # the same pointer which was previously created
            self.ptr = weakref.ref(ptr)

            if inplace:
                self.child = ptr
                return self
            else:
                output = ptr if no_wrap else ptr.wrap(
                    type=tf.keras.models.Model)

        else:

            # TODO [Yann] check if we would want to send the keras
            # object to several workers this way.
            children = list()
            for loc in location:
                children.append(self.send(loc, no_wrap=True))

            output = syft.MultiPointerTensor(children=children)

            if not no_wrap:
                output = output.wrap(type=tf.keras.models.Model)

        return output
Example #22
0
def msb(a_sh):
    """
    Compute the most significant bit in a_sh, this is an implementation of the
    SecureNN paper https://eprint.iacr.org/2018/442.pdf

    Args:
        a_sh (AdditiveSharingTensor): the tensor of study
    Return:
        the most significant bit
    """

    alice, bob = a_sh.locations
    crypto_provider = a_sh.crypto_provider
    L = a_sh.field + 1  # field of a is L - 1

    input_shape = a_sh.shape
    a_sh = a_sh.view(-1)

    # the commented out numbers below correspond to the
    # line numbers in Table 5 of the SecureNN paper
    # https://eprint.iacr.org/2018/442.pdf

    # Common Randomness
    beta = _random_common_bit(alice, bob)
    u = _shares_of_zero(1, L, crypto_provider, alice, bob)

    # 1)
    x = torch.LongTensor(a_sh.shape).random_(L - 1)
    x_bit = decompose(x)
    x_sh = x.share(bob, alice, field=L - 1,
                   crypto_provider=crypto_provider).child
    x_bit_0 = x_bit[..., 0]
    x_bit_sh_0 = x_bit_0.share(bob,
                               alice,
                               field=L,
                               crypto_provider=crypto_provider).child
    x_bit_sh = x_bit.share(bob,
                           alice,
                           field=p,
                           crypto_provider=crypto_provider).child

    # 2)
    y_sh = a_sh * 2
    r_sh = y_sh + x_sh

    # 3)
    r = r_sh.reconstruct() % (
        L - 1)  # convert an additive sharing in multi pointer Tensor
    r_0 = decompose(r)[..., 0]

    # 4)
    beta_prime = private_compare(x_bit_sh, r, beta=beta)

    # 5)
    beta_prime_sh = beta_prime.share(bob,
                                     alice,
                                     field=L,
                                     crypto_provider=crypto_provider).child

    # 7)
    j = sy.MultiPointerTensor(
        children=[torch.tensor([0]).send(alice),
                  torch.tensor([1]).send(bob)])
    gamma = beta_prime_sh + (j * beta) - (2 * beta * beta_prime_sh)

    # 8)
    delta = x_bit_sh_0 + (j * r_0) - (2 * r_0 * x_bit_sh_0)

    # 9)
    theta = gamma * delta

    # 10)
    a = gamma + delta - (theta * 2) + u

    if len(input_shape):
        return a.view(*list(input_shape))
    else:
        return a
Example #23
0
def msb(a_sh):
    """
    Compute the most significant bit in a_sh, this is an implementation of the
    SecureNN paper https://eprint.iacr.org/2018/442.pdf

    Args:
        a_sh (AdditiveSharingTensor): the tensor of study
    Return:
        the most significant bit
    """

    alice, bob = a_sh.locations
    crypto_provider = a_sh.crypto_provider
    L = a_sh.field + 1  # field of a is L - 1

    input_shape = a_sh.shape
    a_sh = a_sh.view(-1)

    # the commented out numbers below correspond to the
    # line numbers in Table 5 of the SecureNN paper
    # https://eprint.iacr.org/2018/442.pdf

    # Common Randomness
    BETA = _random_common_bit(alice, bob)
    u = torch.zeros(1).long().share(alice,
                                    bob,
                                    field=L,
                                    crypto_provider=crypto_provider).child

    # 1)
    x = torch.LongTensor(a_sh.shape).random_(L - 1)
    x_bit = decompose(x)
    x_sh = x.share(bob, alice, field=L - 1,
                   crypto_provider=crypto_provider).child
    x_bit_0 = x_bit[
        ...,
        -1]  # Get last value as decompose reverts bits: 1st one is in last position
    x_bit_sh_0 = x_bit_0.share(bob,
                               alice,
                               field=L,
                               crypto_provider=crypto_provider
                               ).child  # least -> greatest from left -> right
    x_bit_sh = x_bit.share(bob,
                           alice,
                           field=p,
                           crypto_provider=crypto_provider).child

    # 2)
    y_sh = 2 * a_sh
    r_sh = y_sh + x_sh

    # 3)
    r = r_sh.reconstruct(
    )  # convert an additive sharing in multi pointer Tensor
    r_0 = decompose(r)[..., -1]

    # 4)
    BETA_prime = private_compare(x_bit_sh, r, BETA=BETA)

    # 5)
    BETA_prime_sh = BETA_prime.share(bob,
                                     alice,
                                     field=L,
                                     crypto_provider=crypto_provider).child

    # 7)
    j = sy.MultiPointerTensor(
        children=[torch.tensor([0]).send(alice),
                  torch.tensor([1]).send(bob)])
    gamma = BETA_prime_sh + (j * BETA) - (2 * BETA * BETA_prime_sh)

    # 8)
    delta = x_bit_sh_0 + (j * r_0) - (2 * r_0 * x_bit_sh_0)

    # 9)
    theta = gamma * delta

    # 10)
    a = gamma + delta - (2 * theta) + u

    if len(input_shape):
        return a.view(*list(input_shape))
    else:
        return a
Example #24
0
def share_convert(a_sh):
    """
    Convert shares of a in field L to shares of a in field L - 1

    Args:
        a_sh (AdditiveSharingTensor): the additive sharing tensor who owns
            the shares in field L to convert

    Return:
        An additive sharing tensor with shares in field L-1
    """
    assert isinstance(a_sh, sy.AdditiveSharingTensor)

    workers = a_sh.locations
    crypto_provider = a_sh.crypto_provider
    L = a_sh.field

    # Common randomness
    eta_pp = _random_common_bit(*workers)
    r = _random_common_value(L, *workers)

    # Share remotely r
    r_sh = ((r * 1).child[workers[0].id].share(
        *workers, field=L, crypto_provider=crypto_provider).get().child)
    r_shares = r_sh.child
    alpha = (
        ((r_shares[workers[0].id] +
          (r_shares[workers[1].id] * 1).move(workers[0])).get() >=
         L).long().send(*workers).child
    )  # FIXME security issue: the local worker learns alpha while this should be avoided
    u_sh = (torch.zeros(1).long().send(workers[0]).share(
        *workers, field=L - 1, crypto_provider=crypto_provider).get().child)

    # 2)
    a_tilde_sh = a_sh + r_sh
    a_shares = a_sh.child
    ptr0 = a_shares[workers[0].id] + r_shares[workers[0].id]
    beta0 = (
        (a_shares[workers[0].id] + r_shares[workers[0].id]) >= L).long() - (
            (a_shares[workers[0].id] + r_shares[workers[0].id]) < 0).long()
    ptr1 = a_shares[workers[1].id] + r_shares[workers[1].id]
    beta1 = (
        (a_shares[workers[1].id] + r_shares[workers[1].id]) >= L).long() - (
            (a_shares[workers[1].id] + r_shares[workers[1].id]) < 0).long()
    beta = sy.MultiPointerTensor(children=[beta0.long(), beta1.long()])

    # 4)
    a_tilde_shares = a_tilde_sh.child
    delta = (((a_tilde_shares[workers[0].id] * 1).get() +
              (a_tilde_shares[workers[1].id] * 1).get()) >= L).long()
    x = a_tilde_sh.get()

    # 5)
    x_bit = decompose(x)
    x_bit_sh = x_bit.share(*workers, field=p,
                           crypto_provider=crypto_provider).child
    delta_sh = delta.share(*workers,
                           field=L - 1,
                           crypto_provider=crypto_provider).child

    # 6)
    eta_p = private_compare(x_bit_sh, r, eta_pp)

    # 7)
    eta_p_sh = eta_p.share(*workers,
                           field=L - 1,
                           crypto_provider=crypto_provider).child

    # 9)
    j = sy.MultiPointerTensor(children=[
        torch.tensor([0]).send(workers[0]),
        torch.tensor([1]).send(workers[1])
    ])
    eta_sh = eta_p_sh + (1 - j) * eta_pp - 2 * eta_pp * eta_p_sh

    # 10)
    theta_sh = beta - (1 - j) * (alpha + 1) + delta_sh + eta_sh

    # 11)
    y_sh = a_sh - theta_sh + u_sh
    y_sh.field = L - 1
    return y_sh
Example #25
0
def private_compare(x, r, BETA):
    """
    Perform privately x > r

    args:
        x (AdditiveSharedTensor): the private tensor
        r (MultiPointerTensor): the threshold commonly held by alice and bob
        BETA (MultiPointerTensor): a boolean commonly held by alice and bob to
            hide the result of computation for the crypto provider

    return:
        β′ = β ⊕ (x > r).
    """
    assert isinstance(x, sy.AdditiveSharingTensor)
    assert isinstance(r, sy.MultiPointerTensor)
    assert isinstance(BETA, sy.MultiPointerTensor)

    alice, bob = x.locations
    crypto_provider = x.crypto_provider
    p = x.field
    L = 2**Q_BITS  # 2**l

    # the commented out numbers below correspond to the
    # line numbers in Algorithm 3 of the SecureNN paper
    # https://eprint.iacr.org/2018/442.pdf

    # 1)
    t = (r + 1) % L

    # Mask for the case r == 2^l −1
    R_MASK = (r == (L - 1)).long()

    r = decompose(r)
    t = decompose(t)
    # Mask for beta
    BETA = BETA.unsqueeze(1).expand(list(r.shape))
    R_MASK = R_MASK.unsqueeze(1).expand(list(r.shape))

    u = (torch.rand(x.shape) > 0.5).long().send(bob, alice).child
    # Mask for condition i̸=1 in 11)
    l1_mask = torch.zeros(x.shape).long()
    l1_mask[:, -1:] = 1
    l1_mask = l1_mask.send(bob, alice).child

    # if BETA == 0
    # 5)
    j0 = torch.zeros(x.shape).long().send(bob)
    j1 = torch.ones(x.shape).long().send(alice)
    j = sy.MultiPointerTensor(children=[j0, j1])
    w = (j * r) + x - (2 * x * r)

    # 6)
    wf = flip(w, 1)
    wfc = wf.cumsum(1) - wf
    wfcf = flip(wfc, 1)
    c_beta0 = (j * r) - x + j + wfcf

    # elif BETA == 1 AND r != 2^l- 1
    # 8)
    w = x + (j * t) - (2 * t * x)  # FIXME: unused
    # 9)
    c_beta1 = (-j * t) + x + j + wfcf

    # else
    # 11)
    c_igt1 = (1 - j) * (u + 1) - (j * u)
    c_ie1 = (j * -2) + 1
    c_21l = (l1_mask * c_ie1) + ((1 - l1_mask) * c_igt1)

    # Mask combination to execute the if / else statements of 4), 7), 10)
    c = (1 - BETA) * c_beta0 + BETA * c_beta1
    c = (c * (1 - R_MASK)) + (c_21l * R_MASK)

    # 14)
    # Hide c values
    s = torch.randint(1, p, c.shape).send(alice, bob).child
    mask = s * c
    # Permute the mask
    perm = torch.randperm(c.shape[1]).send(alice, bob).child
    permuted_mask = mask[:, perm]
    # Send it to another worker
    remote_mask = permuted_mask.wrap().send(crypto_provider)

    # 15)
    # transform remotely the AdditiveSharingTensor back to a torch tensor
    d_ptr = remote_mask.remote_get()
    beta_prime = (d_ptr == 0).sum(1)
    # Get result back
    res = beta_prime.get()
    return res