Пример #1
0
def div_wraps(
    session: Session,
    r_share: ShareTensor,
    theta_r: ShareTensor,
    x_share: ShareTensor,
    z_shares: List[torch.Tensor],
    y: Union[torch.Tensor, int],
) -> ShareTensor:
    """
    From CrypTen
    Privately computes the number of wraparounds for a set a shares

    To do so, we note that:
        [theta_x] = theta_z + [beta_xr] - [theta_r] - [eta_xr]

    Where:
        [theta_x] is the wraps for a variable x
        [beta_xr] is the differential wraps for variables x and r
        [eta_xr]  is the plaintext wraps for variables x and r

    Note: Since [eta_xr] = 0 with probability 1 - |x| / Q for modulus Q, we
    can make the assumption that [eta_xr] = 0 with high probability.
    """

    beta_xr = count_wraps([x_share.tensor, r_share.tensor])
    theta_x = ShareTensor(encoder_precision=0)
    theta_x.tensor = beta_xr - theta_r.tensor

    if session.rank == 0:
        theta_z = count_wraps(z_shares)
        theta_x.tensor += theta_z

    x_share.tensor //= y

    return theta_x
Пример #2
0
    def przs_generate_random_share(
        self, shape: Union[tuple, torch.Size], generators: List[torch.Generator]
    ) -> Any:
        """Generate a random share using the two generators that are
        hold by a party.
        """

        from sympc.tensor import ShareTensor

        gen0, gen1 = generators

        current_share = generate_random_element(
            tensor_type=self.tensor_type,
            generator=gen0,
            shape=shape,
        )

        next_share = generate_random_element(
            tensor_type=self.tensor_type,
            generator=gen1,
            shape=shape,
        )

        share = ShareTensor(session=self)
        share.tensor = current_share - next_share

        return share
Пример #3
0
def test_generate_shares() -> None:
    precision = 12
    base = 4

    x_secret = torch.Tensor([5.0])

    # test with default values
    x_share = ShareTensor(data=x_secret)

    shares_from_share_tensor = MPCTensor.generate_shares(x_share, nr_parties=2)
    shares_from_secret = MPCTensor.generate_shares(x_secret,
                                                   nr_parties=2,
                                                   config=Config())

    assert sum(shares_from_share_tensor).tensor == sum(
        shares_from_secret).tensor

    x_share = ShareTensor(data=x_secret,
                          config=Config(encoder_precision=precision,
                                        encoder_base=base))

    shares_from_share_tensor = MPCTensor.generate_shares(x_share, 2)
    shares_from_secret = MPCTensor.generate_shares(
        x_secret,
        2,
        config=Config(encoder_precision=precision, encoder_base=base))

    assert sum(shares_from_share_tensor).tensor == sum(
        shares_from_secret).tensor
Пример #4
0
def _get_triples(
    op_str: str, nr_parties: int, a_shape: Tuple[int], b_shape: Tuple[int]
) -> Tuple[Tuple[ShareTensor, ShareTensor, ShareTensor]]:
    """
    The Trusted Third Party (TTP) or Crypto Provider should provide this triples
    Currently, the one that orchestrates the communication provides those triples.
    """

    a_rand = torch.empty(size=a_shape, dtype=torch.long).random_(
        generator=ttp_generator
    )
    a = ShareTensor(data=a_rand, encoder_precision=0)
    a_shares = MPCTensor.generate_shares(a, nr_parties, torch.long)

    b_rand = torch.empty(size=b_shape, dtype=torch.long).random_(
        generator=ttp_generator
    )
    b = ShareTensor(data=b_rand, encoder_precision=0)
    b_shares = MPCTensor.generate_shares(b, nr_parties, torch.long)

    cmd = getattr(operator, op_str)

    c_val = cmd(a_rand, b_rand)
    c = ShareTensor(data=c_val, encoder_precision=0)
    c_shares = MPCTensor.generate_shares(c, nr_parties, torch.long)

    return a_shares, b_shares, c_shares
Пример #5
0
def max_pool2d_backward_helper(
    input_shape: Tuple[int],
    grads_share: ShareTensor,
    kernel_size: Tuple[int, int],
    stride: Tuple[int, int],
    padding: Tuple[int, int],
) -> ShareTensor:
    """Helper function to compute the gradient needed to be passed to the parent node.

    Args:
        input_shape (Tuple[int]): the size of the input tensor when running max_pool2d
        grads_share (ShareTensor): the share for the output gradient specific to this party
        kernel_size (Tuple[int, int]): the kernel size
        stride (Tuple[int, int]): the stride size
        padding (Tuple[int, int]): the padding size

    Returns:
        A ShareTensor specific for the computed gradient

    Raises:
        ValueError: if the input shape (taken into consideration the padding) is smaller than the
            kernel shape
    """
    session = get_session(str(grads_share.session_uuid))

    res_shape = input_shape[:-2]
    res_shape += (input_shape[-2] + 2 * padding[0],
                  input_shape[-1] + 2 * padding[1])

    if res_shape[-2] < kernel_size[0] or res_shape[-1] < kernel_size[1]:
        raise ValueError(
            f"Kernel size ({kernel_size}) has more elements on an axis than "
            f"input shape ({res_shape}) considering padding of {padding}")

    tensor_type = session.tensor_type
    tensor = torch.zeros(res_shape, dtype=tensor_type)

    for i in range((res_shape[-2] - kernel_size[0]) // stride[0] + 1):
        row_idx = i * stride[0]
        for j in range((res_shape[-1] - kernel_size[1]) // stride[1] + 1):
            col_idx = j * stride[1]
            if len(res_shape) == 4:
                tensor[:, :, row_idx:row_idx + kernel_size[0],
                       col_idx:col_idx +
                       kernel_size[1], ] += grads_share.tensor[:, :, i, j]
            else:
                tensor[:, row_idx:row_idx + kernel_size[0], col_idx:col_idx +
                       kernel_size[1], ] += grads_share.tensor[:, i, j]

    if len(res_shape) == 4:
        tensor = tensor[:, :, padding[0]:input_shape[-2],
                        padding[1]:input_shape[-1]]
    else:
        tensor = tensor[:, padding[0]:res_shape[-2] - padding[0],
                        padding[1]:res_shape[-1] - padding[1], ]
    res = ShareTensor(config=grads_share.config)
    res.tensor = tensor

    return res
Пример #6
0
def test_same_session_id_and_data() -> None:

    session_id = uuid4()
    x_share = ShareTensor(data=5, session_uuid=session_id)
    y_share = ShareTensor(data=6, session_uuid=session_id)

    # Different session ids
    assert x_share != y_share
Пример #7
0
def test_send_get(clients, precision, base) -> None:
    x = torch.Tensor([0.122, 1.342, 4.67])
    x_share = ShareTensor(data=x,
                          encoder_precision=precision,
                          encoder_base=base)
    x_ptr = x_share.send(clients[0])

    assert x_share == x_ptr.get()
Пример #8
0
def test_different_tensor() -> None:
    x_share = ShareTensor(data=5)
    session = x_share.session

    y_share = ShareTensor(data=6, session=session)

    # Different values for tensor
    assert x_share != y_share
Пример #9
0
def test_send_get(get_clients, precision, base) -> None:
    x = torch.Tensor([0.122, 1.342, 4.67])
    x_share = ShareTensor(data=x,
                          config=Config(encoder_precision=precision,
                                        encoder_base=base))
    client = get_clients(1)[0]
    x_ptr = x_share.send(client)

    assert x_share == x_ptr.get()
Пример #10
0
        def _request_and_get(share_ptr: ShareTensor) -> ShareTensor:
            """Function used to request and get a share - Duet Setup
            :return: the ShareTensor (local)
            :rtype: ShareTensor
            """

            if not islocal(share_ptr):
                share_ptr.request(name="reconstruct", block=True)
            res = share_ptr.get_copy()
            return res
Пример #11
0
def proto2object(proto: ShareTensor_PB) -> ShareTensor:
    session = protobuf_session_deserializer(proto=proto.session)

    data = protobuf_tensor_deserializer(proto.tensor.tensor)
    share = ShareTensor(data=None, session=session)

    # Manually put the tensor since we do not want to re-encode it
    share.tensor = data.type(session.tensor_type)

    return share
Пример #12
0
def build_triples(x: MPCTensor, y: MPCTensor,
                  op_str: str) -> Tuple[MPCTensor, MPCTensor, MPCTensor]:
    """
    The Trusted Third Party (TTP) or Crypto Provider should provide this triples
    Currently, the one that orchestrates the communication provides those
    """
    if op_str not in EXPECTED_OPS:
        raise ValueError(f"{op_str} should be in {EXPECTED_OPS}")

    shape_x = x.shape
    shape_y = y.shape

    session = x.session

    a = ShareTensor(session=session)
    a.tensor = torch.empty(size=shape_x,
                           dtype=torch.long).random_(generator=ttp_generator)

    b = ShareTensor(session=session)
    b.tensor = torch.empty(size=shape_y,
                           dtype=torch.long).random_(generator=ttp_generator)

    cmd = getattr(operator, op_str)

    # Manually place the tensor and use the same session such that we do
    # not encode the result
    c = ShareTensor(session=session)
    c.tensor = cmd(a.tensor, b.tensor)

    a_sh = MPCTensor(secret=a, session=session)
    b_sh = MPCTensor(secret=b, session=session)
    c_sh = MPCTensor(secret=c, session=session)

    return a_sh, b_sh, c_sh
Пример #13
0
def test_share_repr() -> None:

    x = torch.Tensor([5.0])
    x_share = ShareTensor(data=x)

    encoded_x = x_share.fp_encoder.encode(x)

    expected = f"[ShareTensor]\n\t| {x_share.fp_encoder}"
    expected = f"{expected}\n\t| Data: {encoded_x}"

    assert expected == x_share.__str__() == x_share.__repr__()
Пример #14
0
def mul_parties(session_uuid_str: str, eps: torch.Tensor, delta: torch.Tensor,
                op_str: str, **kwargs) -> ShareTensor:
    """SPDZ Multiplication.

    Args:
        session_uuid_str (str): UUID to identify the session on each party side.
        eps (torch:tensor): Epsilon value of the protocol.
        delta (torch.Tensor): Delta value of the protocol.
        op_str (str): Operator string.
        kwargs: Keywords arguments for the operator.

    Returns:
        ShareTensor: Shared result of the division.
    """
    session = get_session(session_uuid_str)

    crypto_store = session.crypto_store
    eps_shape = tuple(eps.shape)
    delta_shape = tuple(delta.shape)

    primitives = crypto_store.get_primitives_from_store(
        f"beaver_{op_str}", eps_shape, delta_shape)

    a_share, b_share, c_share = primitives

    if op_str in ["conv2d", "conv_transpose2d"]:
        op = getattr(torch, op_str)
    else:
        op = getattr(operator, op_str)

    eps_b = op(eps, b_share.tensor, **kwargs)
    delta_a = op(a_share.tensor, delta, **kwargs)

    share_tensor = c_share.tensor + eps_b + delta_a
    if session.rank == 0:
        delta_eps = op(eps, delta, **kwargs)
        share_tensor += delta_eps

    # Convert to our tensor type
    share_tensor = share_tensor.type(session.tensor_type)

    share = ShareTensor(session_uuid=UUID(session_uuid_str),
                        config=session.config)
    share.tensor = share_tensor

    # Ideally this should stay in the MPCTensor
    # Step 1. Do spdz_mul
    # Step 2. Divide by scale
    # This is done here to reduce one round of communication
    if session.nr_parties == 2:
        share.tensor //= share.fp_encoder.scale

    return share
Пример #15
0
def test_share_print() -> None:

    x = torch.Tensor([5.0])
    x_share = ShareTensor(data=x)

    encoded_x = x_share.fp_encoder.encode(x)

    expected = "[ShareTensor]"
    expected = f"{expected}\n\t| Session UUID: None"
    expected = f"{expected}\n\t| {x_share.fp_encoder}"
    expected = f"{expected}\n\t| Data: {encoded_x}"

    assert expected == x_share.__str__()
Пример #16
0
        def _request_and_get(share_ptr: ShareTensor) -> ShareTensor:
            """Function used to request and get a share - Duet Setup.

            Args:
                share_ptr (ShareTensor): a ShareTensor

            Returns:
                ShareTensor. The ShareTensor in local.

            """
            if not islocal(share_ptr):
                share_ptr.request(block=True)
            res = share_ptr.get_copy()
            return res
Пример #17
0
def mul_parties(
    session: Session,
    eps: torch.Tensor,
    delta: torch.Tensor,
    op_str: str,
) -> ShareTensor:
    """
    [c] = [a * b]
    [eps] = [x] - [a]
    [delta] = [y] - [b]

    Open eps and delta
    [result] = [c] + eps * [b] + delta * [a] + eps * delta

    :return: the ShareTensor for the multiplication
    :rtype: ShareTensor (in our case ShareTensorPointer)
    """

    crypto_store = session.crypto_store
    eps_shape = tuple(eps.shape)
    delta_shape = tuple(delta.shape)

    primitives = crypto_store.get_primitives_from_store(
        f"beaver_{op_str}", eps_shape, delta_shape)

    a_share, b_share, c_share = primitives
    op = getattr(operator, op_str)

    eps_b = op(eps, b_share.tensor)
    delta_a = op(a_share.tensor, delta)

    share_tensor = c_share.tensor + eps_b + delta_a
    if session.rank == 0:
        delta_eps = op(eps, delta)
        share_tensor += delta_eps

    # Convert to our tensor type
    share_tensor = share_tensor.type(session.tensor_type)

    share = ShareTensor(session=session)
    share.tensor = share_tensor

    # Ideally this should stay in the MPCTensor
    # Step 1. Do spdz_mul
    # Step 2. Divide by scale
    # This is done here to reduce one round of communication
    if session.nr_parties == 2:
        share.tensor //= share.fp_encoder.scale

    return share
Пример #18
0
    def prrs_generate_random_share(
        self,
        shape: Union[tuple, torch.Size],
    ) -> Any:
        """Generates a random share using the generators held by a party.

        Args:
            shape (Union[tuple, torch.Size]): Shape for the share.

        Returns:
            Any: ShareTensor or ReplicatedSharedTensor

        """
        from sympc.tensor import ReplicatedSharedTensor
        from sympc.tensor import ShareTensor

        share1, share2 = self._generate_random_share(shape)

        if self.protocol.share_class == ShareTensor:
            # It has encoder_precision = 0 such that the value would not be encoded
            share = ShareTensor(
                data=share1,
                session_uuid=self.uuid,
                config=Config(encoder_precision=0),
            )
        else:
            share = ReplicatedSharedTensor(
                shares=[share1, share2],
                session_uuid=self.uuid,
                config=Config(encoder_precision=0),
            )
        return share
def test_generate_and_transfer_primitive(
    get_clients: Callable,
    nr_parties: int,
    nr_instances: int,
    nr_instances_retrieve: int,
) -> None:
    parties = get_clients(nr_parties)
    session = Session(parties=parties)
    SessionManager.setup_mpc(session)

    g_kwargs = {"nr_parties": nr_parties, "nr_instances": nr_instances}
    CryptoPrimitiveProvider.generate_primitives(
        "test",
        session=session,
        g_kwargs=g_kwargs,
        p_kwargs={},
    )

    for i in range(nr_parties):
        remote_crypto_store = session.session_ptrs[i].crypto_store
        primitives = remote_crypto_store.get_primitives_from_store(
            op_str="test", nr_instances=nr_instances_retrieve).get()
        assert primitives == [
            tuple(ShareTensor(i) for _ in range(PRIMITIVE_NR_ELEMS))
            for _ in range(nr_instances_retrieve)
        ]
Пример #20
0
    def sanity_checks(
        secret: Union[ShareTensor, torch.Tensor, float, int],
        shape: Optional[Union[torch.Size, List[int], Tuple[int, ...]]],
        session: Session,
    ) -> Tuple[Union[ShareTensor, torch.Tensor, float, int], Union[
            torch.Size, List[int], Tuple[int, ...]], bool, ]:
        """Sanity check to validate that a new instance for MPCTensor can be
        created.

        :return: tuple representing the ShareTensor, the shape, if the secret
             is remote or local
        :rtype: tuple representing the ShareTensor (it
        """
        is_remote_secret: bool = False

        if ispointer(secret):
            is_remote_secret = True
            if shape is None:
                raise ValueError(
                    "Shape must be specified if secret is at another party")

            shape = shape
        else:
            if isinstance(secret, (int, float)):
                secret = torch.tensor(data=[secret])

            if isinstance(secret, torch.Tensor):
                secret = ShareTensor(data=secret, session=session)

            shape = secret.shape

        return secret, shape, is_remote_secret
Пример #21
0
    def get_grad_input_padding(
        grad_output, input_size, stride, padding, kernel_size, dilation, session
    ):
        """Auxillary function to find grad input padding.

        Args:
            grad_output: grad
            input_size: the input size
            stride: stride
            padding: padding
            kernel_size: kernal_size
            dilation: dilation
            session: session

        Returns:
            (ShareTensorPointer): The result of the conv2d operation
        """
        new_tuple = torch.nn.grad._grad_input_padding(
            grad_output=grad_output.tensor,
            input_size=input_size,
            stride=(stride, stride),
            padding=(padding, padding),
            kernel_size=kernel_size,
            dilation=(dilation, dilation),
        )
        share_tensor = ShareTensor(torch.tensor(new_tuple), config=session.config)
        return share_tensor
Пример #22
0
def test_div_with_float_exception() -> None:

    x = torch.Tensor([[0.125, -1.25], [-4.25, 4]])

    x_share = ShareTensor(data=x, encoder_base=16, encoder_precision=2)

    with pytest.raises(ValueError):
        x_share / 5.3
Пример #23
0
def test_ineq_share_share_local(op_str, precision, base) -> None:
    op = getattr(operator, op_str)

    x = torch.Tensor([[0.125, -1.25], [-4.25, 4]])
    y = torch.Tensor([[4.5, -2.5], [5, 2.25]])

    x_share = ShareTensor(data=x,
                          encoder_base=base,
                          encoder_precision=precision)
    y_share = ShareTensor(data=y,
                          encoder_base=base,
                          encoder_precision=precision)

    expected_res = op(x, y)
    res = op(x_share, y_share)

    assert (res == expected_res).all()
Пример #24
0
def test_ops_share_share_local(op_str, precision, base) -> None:
    op = getattr(operator, op_str)

    x = torch.Tensor([[0.125, -1.25], [-4.25, 4]])
    y = torch.Tensor([[4.5, -2.5], [5, 2.25]])

    x_share = ShareTensor(data=x,
                          encoder_base=base,
                          encoder_precision=precision)
    y_share = ShareTensor(data=y,
                          encoder_base=base,
                          encoder_precision=precision)

    expected_res = op(x, y)
    res = op(x_share, y_share)
    tensor_decoded = res.fp_encoder.decode(res.tensor)

    assert np.allclose(tensor_decoded, expected_res, rtol=base**-precision)
Пример #25
0
def div_wraps(
    r_share: ShareTensor,
    theta_r: ShareTensor,
    x_share: ShareTensor,
    z_shares: List[torch.Tensor],
    y: Union[torch.Tensor, int],
) -> ShareTensor:
    """From CrypTen Privately computes the number of wraparounds for a set a shares.

    To do so, we note that:
        [theta_x] = theta_z + [beta_xr] - [theta_r] - [eta_xr]

    Where:
        [theta_x] is the wraps for a variable x
        [beta_xr] is the differential wraps for variables x and r
        [eta_xr]  is the plaintext wraps for variables x and r

    Note: Since [eta_xr] = 0 with probability 1 - |x| / Q for modulus Q, we
    can make the assumption that [eta_xr] = 0 with high probability.

    Args:
        r_share (ShareTensor): share for a random variable "r"
        theta_r (ShareTensor): share for the number of wraparounds for "r"
        x_share (ShareTensor): shares for which we want to compute the number of wraparounds
        z_shares (List[torch.Tensor]): list of shares for a random value
        y (Union[torch.Tensor, int]): the number/tensor by which we divide

    Returns:
        ShareTensor representing the number of wraparounds
    """
    session = get_session(str(r_share.session_uuid))

    beta_xr = count_wraps([x_share.tensor, r_share.tensor])
    theta_x = ShareTensor(config=Config(encoder_precision=0))
    theta_x.tensor = beta_xr - theta_r.tensor

    if session.rank == 0:
        theta_z = count_wraps(z_shares)
        theta_x.tensor += theta_z

    x_share.tensor //= y

    return theta_x
Пример #26
0
def test_generate_shares_config(get_clients) -> None:
    x_secret = torch.Tensor([5.0])
    x_share = ShareTensor(data=x_secret)

    shares_from_share_tensor = MPCTensor.generate_shares(x_share, 2)
    shares_from_secret = MPCTensor.generate_shares(
        x_secret, 2, config=Config(encoder_base=2, encoder_precision=16)
    )

    assert sum(shares_from_share_tensor) == sum(shares_from_secret)
Пример #27
0
def mul_parties(
    session: Session,
    a_share: ShareTensor,
    b_share: ShareTensor,
    c_share: ShareTensor,
    eps: torch.Tensor,
    delta: torch.Tensor,
    op_str: str,
) -> ShareTensor:
    """
    [c] = [a * b]
    [eps] = [x] - [a]
    [delta] = [y] - [b]

    Open eps and delta
    [result] = [c] + eps * [b] + delta * [a] + eps * delta

    :return: the ShareTensor for the multiplication
    :rtype: ShareTensor (in our case ShareTensorPointer)
    """

    op = getattr(operator, op_str)

    eps_b = op(eps, b_share.tensor)
    delta_a = op(a_share.tensor, delta)

    share_tensor = c_share.tensor + eps_b + delta_a
    if session.rank == 0:
        delta_eps = op(eps, delta)
        share_tensor += delta_eps

    scale = session.config.encoder_base ** session.config.encoder_precision

    share_tensor //= scale

    # Convert to our tensor type
    share_tensor = share_tensor.type(session.tensor_type)

    share = ShareTensor(session=session)
    share.tensor = share_tensor

    return share
Пример #28
0
    def generate_shares(secret, session: Session) -> List[ShareTensor]:
        """Given a secret generate, split it into a number of shares such that
        each party would get one

        :return: list of shares
        :rtype: List of Zero Shares
        """
        if not isinstance(secret, ShareTensor):
            raise ValueError("Secret should be a ShareTensor")

        parties: List[Any] = session.parties
        nr_parties = len(parties)

        min_value = session.min_value
        max_value = session.max_value

        shape = secret.shape
        tensor_type = session.tensor_type

        random_shares = []
        generator = csprng.create_random_device_generator()

        for _ in range(nr_parties - 1):
            rand_value = torch.empty(
                size=shape, dtype=torch.long).random_(generator=generator)
            share = ShareTensor(session=session)

            # Add the share after such that we do not encode it
            share.tensor = rand_value
            random_shares.append(share)

        shares = []
        for i in range(len(parties)):
            if i == 0:
                share = random_shares[i]
            elif i < nr_parties - 1:
                share = random_shares[i] - random_shares[i - 1]
            else:
                share = secret - random_shares[i - 1]

            shares.append(share)
        return shares
Пример #29
0
def test_invalid_op_exception() -> None:

    op = getattr(operator, "truediv")

    x = torch.Tensor([[0.125, -1.25], [-4.25, 4]])
    y = torch.Tensor([[4.5, -2.5], [5, 2.25]])

    x_share = ShareTensor(data=x, encoder_base=16, encoder_precision=2)

    with pytest.raises(TypeError):
        op(y, x_share)
def provider_test(nr_parties: int, nr_instances: int) -> List[Tuple[int]]:
    """This function will generate the values:

    [((0, 0, 0, 0), (0, 0, 0, 0), ...), ((1, 1, 1, 1), (1, 1, 1, 1)),
    ...]
    """
    primitives = [
        tuple(
            tuple(ShareTensor(data=i) for _ in range(PRIMITIVE_NR_ELEMS))
            for _ in range(nr_instances)) for i in range(nr_parties)
    ]
    return primitives