def test_grad_div_backward(get_clients) -> None:
    parties = get_clients(2)

    session = Session(parties=parties)
    session.autograd_active = True
    SessionManager.setup_mpc(session)

    x_secret = torch.tensor([1.0, 2.1, 3.0, -4.13], requires_grad=True)
    x = MPCTensor(secret=x_secret, session=session, requires_grad=True)

    y_secret = torch.tensor([-2.0, 3.0, 4.39, 5.0], requires_grad=True)
    y = MPCTensor(secret=y_secret, session=session, requires_grad=True)

    z = x_secret / y_secret
    z.backward(torch.tensor([1, 1, 1, 1]))

    grad = torch.tensor([1, 1, 1, 1])
    grad_mpc = MPCTensor(secret=grad, session=session, requires_grad=True)

    ctx = {"x": x, "y": y, "result": x / y}

    grad_x, grad_y = GradDiv.backward(ctx, grad_mpc)

    expected_grad_x = x_secret.grad
    expected_grad_y = y_secret.grad

    res_x = grad_x.reconstruct()
    res_y = grad_y.reconstruct()

    assert np.allclose(res_x, expected_grad_x, rtol=1e-2)
    assert np.allclose(res_y, expected_grad_y, rtol=1e-2)
Exemple #2
0
def build_triples(x: MPCTensor, y: MPCTensor,
                  op_str: str) -> Tuple[MPCTensor, MPCTensor, MPCTensor]:
    """
    The Trusted Third Party (TTP) or Crypto Provider should provide this triples
    Currently, the one that orchestrates the communication provides those
    """
    if op_str not in EXPECTED_OPS:
        raise ValueError(f"{op_str} should be in {EXPECTED_OPS}")

    shape_x = x.shape
    shape_y = y.shape

    session = x.session

    a = ShareTensor(session=session)
    a.tensor = torch.empty(size=shape_x,
                           dtype=torch.long).random_(generator=ttp_generator)

    b = ShareTensor(session=session)
    b.tensor = torch.empty(size=shape_y,
                           dtype=torch.long).random_(generator=ttp_generator)

    cmd = getattr(operator, op_str)

    # Manually place the tensor and use the same session such that we do
    # not encode the result
    c = ShareTensor(session=session)
    c.tensor = cmd(a.tensor, b.tensor)

    a_sh = MPCTensor(secret=a, session=session)
    b_sh = MPCTensor(secret=b, session=session)
    c_sh = MPCTensor(secret=c, session=session)

    return a_sh, b_sh, c_sh
Exemple #3
0
    def share_state_dict(
        self,
        state_dict: Dict[str, Any],
    ) -> None:
        """Share the parameters of the normal Linear layer.

        Args:
            state_dict (Dict[str, Any]): the state dict that would be shared
        """

        bias = None
        if ispointer(state_dict):
            weight = state_dict["weight"].resolve_pointer_type()
            if "bias" in weight.client.python.List(state_dict).get():
                bias = state_dict["bias"].resolve_pointer_type()
            shape = weight.client.python.Tuple(weight.shape)
            shape = shape.get()
        else:
            weight = state_dict["weight"]
            bias = state_dict.get("bias")
            shape = state_dict["weight"].shape

        self.out_features, self.in_features = shape
        self.weight = MPCTensor(secret=weight,
                                session=self.session,
                                shape=shape)

        if bias is not None:
            self.bias = MPCTensor(secret=bias,
                                  session=self.session,
                                  shape=(self.out_features, ))
Exemple #4
0
def _get_triples(
    op_str: str, nr_parties: int, a_shape: Tuple[int], b_shape: Tuple[int]
) -> Tuple[Tuple[ShareTensor, ShareTensor, ShareTensor]]:
    """
    The Trusted Third Party (TTP) or Crypto Provider should provide this triples
    Currently, the one that orchestrates the communication provides those triples.
    """

    a_rand = torch.empty(size=a_shape, dtype=torch.long).random_(
        generator=ttp_generator
    )
    a = ShareTensor(data=a_rand, encoder_precision=0)
    a_shares = MPCTensor.generate_shares(a, nr_parties, torch.long)

    b_rand = torch.empty(size=b_shape, dtype=torch.long).random_(
        generator=ttp_generator
    )
    b = ShareTensor(data=b_rand, encoder_precision=0)
    b_shares = MPCTensor.generate_shares(b, nr_parties, torch.long)

    cmd = getattr(operator, op_str)

    c_val = cmd(a_rand, b_rand)
    c = ShareTensor(data=c_val, encoder_precision=0)
    c_shares = MPCTensor.generate_shares(c, nr_parties, torch.long)

    return a_shares, b_shares, c_shares
Exemple #5
0
def test_generate_shares() -> None:
    precision = 12
    base = 4

    x_secret = torch.Tensor([5.0])

    # test with default values
    x_share = ShareTensor(data=x_secret)

    shares_from_share_tensor = MPCTensor.generate_shares(x_share, nr_parties=2)
    shares_from_secret = MPCTensor.generate_shares(x_secret,
                                                   nr_parties=2,
                                                   config=Config())

    assert sum(shares_from_share_tensor).tensor == sum(
        shares_from_secret).tensor

    x_share = ShareTensor(data=x_secret,
                          config=Config(encoder_precision=precision,
                                        encoder_base=base))

    shares_from_share_tensor = MPCTensor.generate_shares(x_share, 2)
    shares_from_secret = MPCTensor.generate_shares(
        x_secret,
        2,
        config=Config(encoder_precision=precision, encoder_base=base))

    assert sum(shares_from_share_tensor).tensor == sum(
        shares_from_secret).tensor
def test_ops_prime_public_xor(get_clients, security, bit) -> None:
    parties = get_clients(3)
    protocol = Falcon(security)
    session = Session(protocol=protocol, parties=parties)
    SessionManager.setup_mpc(session)
    ring_size = PRIME_NUMBER

    sh1 = torch.tensor([[17, 44], [8, 20]], dtype=torch.uint8)
    sh2 = torch.tensor([[8, 51], [27, 52]], dtype=torch.uint8)
    sh3 = torch.tensor([[42, 40], [32, 63]], dtype=torch.uint8)
    shares = [sh1, sh2, sh3]
    rst_list = ReplicatedSharedTensor.distribute_shares(shares=shares,
                                                        session=session,
                                                        ring_size=ring_size)
    tensor = MPCTensor(shares=rst_list, session=session)
    tensor.shape = sh1.shape

    secret = ReplicatedSharedTensor.shares_sum(shares, ring_size)

    value = torch.tensor([bit], dtype=torch.uint8)

    result = operator.xor(tensor, value)
    expected_res = secret ^ value

    assert (result.reconstruct(decode=False) == expected_res).all()
Exemple #7
0
def test_mpc_sort(get_clients, ascending):
    clients = get_clients(2)
    session = Session(parties=clients)
    SessionManager.setup_mpc(session)

    x = MPCTensor(secret=torch.tensor([1]), session=session)
    y = MPCTensor(secret=torch.tensor([3]), session=session)
    z = MPCTensor(secret=torch.tensor([6]), session=session)
    w = MPCTensor(secret=torch.tensor([18]), session=session)
    v = MPCTensor(secret=torch.tensor([5]), session=session)

    mpctensor_list = [x, y, z, w, v]

    sorted = sort(mpctensor_list, ascending=ascending)

    expected_list = [
        torch.tensor([1.0]),
        torch.tensor([3.0]),
        torch.tensor([5.0]),
        torch.tensor([6.0]),
        torch.tensor([18.0]),
    ]

    sorted_list_1 = []
    for i in sorted:
        sorted_list_1.append(i.reconstruct())

    if ascending:
        assert sorted_list_1 == expected_list
    else:
        assert sorted_list_1 == expected_list[::-1]
Exemple #8
0
def log_softmax(tensor: MPCTensor, dim: Optional[int] = None) -> MPCTensor:
    """Applies a softmax followed by a logarithm.

    While mathematically equivalent to log(softmax(x)), doing these two
    operations separately is slower, and numerically unstable. This function
    uses an alternative formulation to compute the output and gradient correctly.

    Args:
        tensor (MPCTensor): whose log-softmax has to be calculated
        dim (int): dim along which log-softmax is to be calculated

    Returns:
        MPCTensor: calculated MPCTensor
    """
    if dim is None:
        dim = len(tensor.shape) - 1

    # Single Element along dim
    if tensor.shape[dim] == 1:
        przs = MPCTensor.generate_przs(shape=tensor.shape,
                                       session=tensor.session)
        zeros = MPCTensor(tensor.session, shape=tensor.shape, shares=przs)
        return zeros  # Equivalent to torch.zeros_like(tensor)

    maximum_value = tensor.max(dim, keepdim=True)[0]
    logits = tensor - maximum_value

    normalize_term = exp(logits).sum(dim, keepdim=True)
    result = logits - log(normalize_term)
    return result
Exemple #9
0
def softmax(tensor: MPCTensor, dim: Optional[int] = None) -> MPCTensor:
    """Calculates tanh of given tensor's elements along the given dimension.

    Args:
        tensor (MPCTensor): whose softmax has to be calculated
        dim (int): dim along which softmax is to be calculated

    Returns:
        MPCTensor: calculated MPCTensor
    """
    if dim is None:
        dim = len(tensor.shape) - 1

    # Single Element along dim
    if tensor.shape[dim] == 1:
        przs = MPCTensor.generate_przs(shape=tensor.shape,
                                       session=tensor.session)
        zeros = MPCTensor(tensor.session, shape=tensor.shape, shares=przs)
        return zeros + 1  # Equivalent to torch.ones_like(tensor)

    maximum_value = tensor.max(dim, keepdim=True)[0]
    logits = tensor - maximum_value
    numerator = exp(logits)

    denominator = numerator.sum(dim, keepdim=True)
    return numerator * reciprocal(denominator)
Exemple #10
0
def test_get_grad_input_padding(get_clients, common_args: List,
                                nr_parties) -> None:
    clients = get_clients(2)
    session = Session(parties=clients)
    SessionManager.setup_mpc(session)

    grad = torch.Tensor([[[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0,
                                                              1.0]]]])
    grad_mpc = MPCTensor(secret=grad, session=session)

    input_size, stride, padding, kernel_size, dilation = common_args

    expected_padding = torch.nn.functional.grad._grad_input_padding(
        grad,
        input_size,
        (stride, stride),
        (padding, padding),
        kernel_size,
        (dilation, dilation),
    )

    args = [[el] + common_args + [session] for el in grad_mpc.share_ptrs]
    shares = parallel_execution(GradConv2d.get_grad_input_padding,
                                grad_mpc.session.parties)(args)
    grad_input_padding = MPCTensor(shares=shares, session=grad_mpc.session)
    output_padding_tensor = grad_input_padding.reconstruct()
    output_padding_tensor /= grad_mpc.session.nr_parties
    calculated_padding = tuple(output_padding_tensor.to(torch.int).tolist())

    assert calculated_padding == expected_padding
def test_session_ring_xor(get_clients, security, bit) -> None:
    parties = get_clients(3)
    protocol = Falcon(security)
    session = Session(protocol=protocol, parties=parties)
    SessionManager.setup_mpc(session)
    ring_size = session.ring_size
    tensor_type = session.tensor_type
    config = Config(encoder_base=1, encoder_precision=0)
    x_sh1 = torch.tensor([[927021, 3701]], dtype=tensor_type)
    x_sh2 = torch.tensor([[805274, 401]], dtype=tensor_type)
    x_sh3 = torch.tensor([[-1732294, -4102]], dtype=tensor_type)
    bit_sh_1, bit_sh_2, bit_sh_3 = bit
    b_sh1 = torch.tensor([bit_sh_1], dtype=tensor_type)
    b_sh2 = torch.tensor([bit_sh_2], dtype=tensor_type)
    b_sh3 = torch.tensor([bit_sh_3], dtype=tensor_type)
    shares_x = [x_sh1, x_sh2, x_sh3]
    shares_b = [b_sh1, b_sh2, b_sh3]
    rst_list_x = ReplicatedSharedTensor.distribute_shares(shares=shares_x,
                                                          session=session,
                                                          ring_size=ring_size,
                                                          config=config)
    rst_list_b = ReplicatedSharedTensor.distribute_shares(shares=shares_b,
                                                          session=session,
                                                          ring_size=ring_size,
                                                          config=config)
    x = MPCTensor(shares=rst_list_x, session=session, shape=x_sh1.shape)
    b = MPCTensor(shares=rst_list_b, session=session, shape=b_sh1.shape)
    secret_x = ReplicatedSharedTensor.shares_sum(shares_x, ring_size)
    secret_b = ReplicatedSharedTensor.shares_sum(shares_b, ring_size)
    result = operator.xor(x, b)
    expected_res = secret_x ^ secret_b
    assert (result.reconstruct(decode=False) == expected_res).all()
Exemple #12
0
def test_setupmpc_nocall_exception(get_clients) -> None:
    alice_client, bob_client = get_clients(2)
    session = Session(parties=[alice_client, bob_client])

    with pytest.raises(ValueError):
        MPCTensor(secret=42, session=session)

    with pytest.raises(ValueError):
        MPCTensor(secret=torch.Tensor([1, -2]), session=session)
Exemple #13
0
def test_generate_shares_config(get_clients) -> None:
    x_secret = torch.Tensor([5.0])
    x_share = ShareTensor(data=x_secret)

    shares_from_share_tensor = MPCTensor.generate_shares(x_share, 2)
    shares_from_secret = MPCTensor.generate_shares(
        x_secret, 2, config=Config(encoder_base=2, encoder_precision=16)
    )

    assert sum(shares_from_share_tensor) == sum(shares_from_secret)
Exemple #14
0
def test_remote_mpc_with_shape(get_clients) -> None:
    alice_client, bob_client = get_clients(2)
    session = Session(parties=[alice_client, bob_client])
    SessionManager.setup_mpc(session)

    x_remote = alice_client.torch.Tensor([1, -2, 0.3])
    x = MPCTensor(secret=x_remote, shape=(1, 3), session=session)
    result = x.reconstruct()

    assert np.allclose(x_remote.get(), result, atol=1e-5)
def test_rst_distribute_reconstruct_float_secret(get_clients, parties,
                                                 security) -> None:
    parties = get_clients(parties)
    protocol = Falcon(security)
    session = Session(protocol=protocol, parties=parties)
    SessionManager.setup_mpc(session)

    secret = 43.2
    a = MPCTensor(secret=secret, session=session)
    assert np.allclose(secret, a.reconstruct(), atol=1e-3)
Exemple #16
0
def test_ops_different_share_class(get_clients) -> None:
    clients = get_clients(2)
    session1 = Session(parties=clients)
    session2 = Session(parties=clients, protocol=falcon)
    SessionManager.setup_mpc(session1)
    SessionManager.setup_mpc(session2)
    x = torch.tensor([1, 2, 3])
    x_share = MPCTensor(secret=x, session=session1)
    x_rst = MPCTensor(secret=x, session=session2)
    with pytest.raises(TypeError):
        x_share + x_rst
Exemple #17
0
def max_pool2d_backward(
    grad: MPCTensor,
    input_shape: Tuple[int],
    indices: MPCTensor,
    kernel_size: Union[int, Tuple[int, int]],
    stride: Optional[Union[int, Tuple[int, int]]] = None,
    padding: Union[int, Tuple[int, int]] = 0,
    dilation: Union[int, Tuple[int, int]] = 1,
) -> MPCTensor:
    """Helper function for the backwards step for max_pool2d.

    Credits goes to the CrypTen team.

    Args:
        grad (MPCTensor): gradient that comes from the child node
        input_shape (Tuple[int]): the shape of the input when the max_pool2d was run
        indices (MPCTensor): the indices where the maximum value was found in the input
        kernel_size (Union[int, Tuple[int, int]]): the kernel size
            in case it is passed as an integer then that specific value is used for height and width
        stride (Union[int, Tuple[int, int]]): the stride size
            in case it is passed as an integer then that specific value is used for height and width
        padding (Union[int, Tuple[int, int]]): the padding size
            in case it is passed as an integer then that specific value is used for height and width
        dilation (Union[int, Tuple[int, int]]): the dilation size
            in case it is passed as an integer then that specific value is used for height and width

    Returns:
        The gradient that should be backpropagated (MPCTensor)

    Raises:
        ValueError: In case some of the values for the parameters are not supported
    """
    kernel_size, stride, padding, dilation = _sanity_check_max_pool2d(
        kernel_size, stride, padding, dilation)
    if len(grad.shape) != 4 and len(grad.shape) != 3:
        raise ValueError(
            f"Expected gradient to have 3/4 dimensions (4 with batch). Found {len(grad.shape)}"
        )

    if len(indices.shape) != len(grad.shape) + 2:
        raise ValueError(
            "Expected indices shape to have 2 extra dimensions because of "
            f"(kernel_size, kernel_size), but has {len(indices.shape)}")

    session = grad.session

    mappings = grad.view(grad.shape + (1, 1)) * indices
    args = [[tuple(input_shape), grads_share, kernel_size, stride, padding]
            for grads_share in mappings.share_ptrs]
    shares = parallel_execution(max_pool2d_backward_helper,
                                session.parties)(args)

    res = MPCTensor(shares=shares, shape=input_shape, session=session)
    return res
Exemple #18
0
    def share_state_dict(
        self,
        state_dict: Dict[str, Any],
        additional_attributes: Optional[Dict[str, Any]] = None,
    ) -> None:
        """Share the parameters of the normal Conv2d layer.

        Args:
            state_dict (Dict[str, Any]): the state dict that would be shared.
            additional_attributes (Dict[str, Any]): Attributes of conv apart from weights.

        """
        bias = None
        if ispointer(state_dict):

            weight = state_dict["weight"].resolve_pointer_type()
            if "bias" in weight.client.python.List(state_dict).get():
                bias = state_dict["bias"].resolve_pointer_type()
            shape = weight.client.python.Tuple(weight.shape)
            shape = shape.get()

        else:
            weight = state_dict["weight"]
            bias = state_dict.get("bias")
            shape = state_dict["weight"].shape

        if ispointer(additional_attributes):
            self.set_additional_attributes(
                additional_attributes.get().resolve_pointer_type())
        else:
            self.set_additional_attributes(additional_attributes)

        # Weight shape (out_channel, in_channels/groups, kernel_size_w, kernel_size_h)
        # we have groups == 1

        (
            self.out_channels,
            self.in_channels,
            kernel_size_w,
            kernel_size_h,
        ) = shape

        self.kernel_size = (kernel_size_w, kernel_size_h)
        self.weight = MPCTensor(secret=weight,
                                session=self.session,
                                shape=shape)
        self._parameters = OrderedDict({"weight": self.weight})

        if bias is not None:
            self.bias = MPCTensor(secret=bias,
                                  session=self.session,
                                  shape=(self.out_channels, ))
            self._parameters["bias"] = self.bias
Exemple #19
0
def test_select_shares_exception_shape(get_clients) -> None:
    parties = get_clients(3)
    falcon = Falcon()
    session = Session(parties=parties, protocol=falcon)
    SessionManager.setup_mpc(session)
    val = MPCTensor(secret=1, session=session)
    rst = val.share_ptrs[0].get_copy()
    rst.ring_size = 2
    val.share_ptrs[0] = rst.send(parties[0])
    val.shape = None
    with pytest.raises(ValueError):
        Falcon.select_shares(val, val, val)
def test_rst_distribute_reconstruct_tensor_secret(get_clients, parties,
                                                  security) -> None:
    parties = get_clients(parties)
    protocol = Falcon(security)
    session = Session(protocol=protocol, parties=parties)
    SessionManager.setup_mpc(session)

    secret = torch.Tensor([[1, -2.0, 0.0], [3.9, -4.394, -0.9],
                           [-43, 100, -0.4343], [1.344, -5.0, 0.55]])

    a = MPCTensor(secret=secret, session=session)
    assert np.allclose(secret, a.reconstruct(), atol=1e-3)
Exemple #21
0
def test_generate_shares_session(get_clients) -> None:
    clients = get_clients(2)
    session = Session(parties=clients)
    SessionManager.setup_mpc(session)

    x_secret = torch.Tensor([5.0])
    x_share = ShareTensor(data=x_secret, session=session)

    shares_from_share_tensor = MPCTensor.generate_shares(x_share, 2)
    shares_from_secret = MPCTensor.generate_shares(x_secret, 2, session=session)

    assert sum(shares_from_share_tensor) == sum(shares_from_secret)
Exemple #22
0
def test_op_mpc_different_sessions(get_clients) -> None:
    clients = get_clients(2)
    session_one = Session(parties=clients)
    session_two = Session(parties=clients)
    SessionManager.setup_mpc(session_one)
    SessionManager.setup_mpc(session_two)

    x = MPCTensor(secret=torch.Tensor([1, -2]), session=session_one)
    y = MPCTensor(secret=torch.Tensor([1, -2]), session=session_two)

    with pytest.raises(ValueError):
        x + y
def test_invalid_malicious_reconstruction(get_clients, parties):
    parties = get_clients(parties)
    protocol = Falcon("malicious")
    session = Session(protocol=protocol, parties=parties)
    SessionManager.setup_mpc(session)

    secret = 42.32

    tensor = MPCTensor(secret=secret, session=session)
    tensor.share_ptrs[0][0] = tensor.share_ptrs[0][0] + 4

    with pytest.raises(ValueError):
        tensor.reconstruct()
def test_rst_reconstruct_zero_share_ptrs(get_clients, security) -> None:
    parties = get_clients(3)
    protocol = Falcon(security)
    session = Session(protocol=protocol, parties=parties)
    SessionManager.setup_mpc(session)

    secret = torch.Tensor([[1, -2.0, 0.0], [3.9, -4.394, -0.9],
                           [-43, 100, -0.4343], [1.344, -5.0, 0.55]])

    a = MPCTensor(secret=secret, session=session)
    a.share_ptrs = []
    with pytest.raises(ValueError):
        a.reconstruct()
Exemple #25
0
def mul_master(x: MPCTensor, y: MPCTensor, op_str: str) -> MPCTensor:
    """Function that is executed by the orchestrator to multiply two secret values

    :return: a new set of shares that represents the multiplication
           between two secret values
    :rtype: MPCTensor
    """

    if op_str not in EXPECTED_OPS:
        raise ValueError(f"{op_str} should be in {EXPECTED_OPS}")

    session = x.session

    shape_x = tuple(x.shape)
    shape_y = tuple(y.shape)

    primitives = CryptoPrimitiveProvider.generate_primitives(
        f"beaver_{op_str}",
        sessions=session.session_ptrs,
        g_kwargs={
            "a_shape": shape_x,
            "b_shape": shape_y,
            "nr_parties": session.nr_parties,
        },
        p_kwargs={
            "a_shape": shape_x,
            "b_shape": shape_y
        },
    )

    a_sh, b_sh, c_sh = primitives[0]

    a_mpc = MPCTensor(shares=a_sh, shape=x.shape, session=session)
    b_mpc = MPCTensor(shares=b_sh, shape=y.shape, session=session)

    eps = x - a_mpc
    delta = y - b_mpc

    eps_plaintext = eps.reconstruct(decode=False)
    delta_plaintext = delta.reconstruct(decode=False)

    # Arguments that must be sent to all parties
    common_args = [eps_plaintext, delta_plaintext, op_str]

    # Specific arguments to each party
    args = [[el] + common_args for el in session.session_ptrs]

    shares = parallel_execution(mul_parties, session.parties)(args)
    result = MPCTensor(shares=shares, shape=c_sh[0].shape, session=session)

    return result
Exemple #26
0
def test_mul_private_exception_nothreeparties(get_clients, parties):
    parties = get_clients(parties)
    protocol = Falcon("semi-honest")
    session = Session(protocol=protocol, parties=parties)
    SessionManager.setup_mpc(session)

    secret1 = torch.tensor([[-100, 20, 30], [-90, 1000, 1], [1032, -323, 15]])
    secret2 = 8

    tensor1 = MPCTensor(secret=secret1, session=session)
    tensor2 = MPCTensor(secret=secret2, session=session)

    with pytest.raises(ValueError):
        tensor1 * tensor2
Exemple #27
0
def test_sort_invalidim_exception(get_clients):

    clients = get_clients(2)
    session = Session(parties=clients)
    SessionManager.setup_mpc(session)

    x = MPCTensor(secret=torch.tensor([1]), session=session)
    y = MPCTensor(secret=torch.tensor([3]), session=session)
    z = MPCTensor(secret=torch.tensor([6, 2]), session=session)

    mpctensor_list = [x, y, z]

    with pytest.raises(ValueError):
        sort(mpctensor_list)
def test_invalid_malicious_reconstruction(get_clients, parties):
    parties = get_clients(parties)
    protocol = Falcon("malicious")
    session = Session(protocol=protocol, parties=parties)
    SessionManager.setup_mpc(session)

    secret = torch.Tensor([[1, -2.0, 0.0], [3.9, -4.394, -0.9],
                           [-43, 100, -0.4343], [1.344, -5.0, 0.55]])

    tensor = MPCTensor(secret=secret, session=session)
    tensor.share_ptrs[0][0] = tensor.share_ptrs[0][0] + 4

    with pytest.raises(ValueError):
        tensor.reconstruct()
Exemple #29
0
def public_divide(x: MPCTensor, y: Union[torch.Tensor, int]) -> MPCTensor:
    """Function that is executed by the orchestrator to divide a secret by a public value.

    Args:
        x (MPCTensor): Private numerator.
        y (Union[torch.Tensor, int]): Public denominator.

    Returns:
        MPCTensor: A new set of shares that represents the division.
    """
    session = x.session
    res_shape = x.shape

    if session.nr_parties == 2:
        shares = [operator.truediv(share, y) for share in x.share_ptrs]
        return MPCTensor(shares=shares, session=session, shape=res_shape)

    primitives = CryptoPrimitiveProvider.generate_primitives(
        "beaver_wraps",
        session=session,
        g_kwargs={
            "nr_parties": session.nr_parties,
            "shape": res_shape
        },
        p_kwargs=None,
    )

    r_sh, theta_r_sh = list(zip(*list(zip(*primitives))[0]))

    r_mpc = MPCTensor(shares=r_sh, session=session, shape=x.shape)

    z = r_mpc + x
    z_shares_local = z.get_shares()

    common_args = [z_shares_local, y]
    args = zip(
        r_mpc.share_ptrs,
        theta_r_sh,
        x.share_ptrs,
    )
    args = [list(el) + common_args for el in args]

    theta_x = parallel_execution(div_wraps, session.parties)(args)
    theta_x_plaintext = MPCTensor(shares=theta_x,
                                  session=session).reconstruct()

    res = x - theta_x_plaintext * 4 * ((session.ring_size // 4) // y)

    return res
Exemple #30
0
def test_mse_loss(get_clients) -> None:
    clients = get_clients(4)
    session = Session(parties=clients)
    SessionManager.setup_mpc(session)

    y_secret = torch.Tensor([0.23, 0.32, 0.2, 0.3])
    y_mpc = MPCTensor(secret=y_secret, session=session)

    y_pred = torch.Tensor([0.1, 0.3, 0.4, 0.2])
    y_pred_mpc = MPCTensor(secret=y_pred, session=session)

    res = mse_loss(y_mpc, y_pred_mpc)
    res_expected = torch.nn.functional.mse_loss(y_secret, y_pred, reduction="sum")

    assert np.allclose(res.reconstruct(), res_expected, atol=1e-4)