Пример #1
0
def fss_op(x1: MPCTensor, x2: MPCTensor, op="eq") -> MPCTensor:
    """Define the workflow for a binary operation using Function Secret Sharing.

    Currently supported operand are = & <=, respectively corresponding to
    op = 'eq' and 'comp'.

    Args:
        x1 (MPCTensor): First private value.
        x2 (MPCTensor): Second private value.
        op: Type of operation to perform, should be 'eq' or 'comp'. Defaults to eq.

    Returns:
        MPCTensor: Shares of the comparison.
    """
    assert not th.cuda.is_available()  # nosec

    # FIXME: Better handle the case where x1 or x2 is not a MPCTensor. For the moment
    # FIXME: we cast it into a MPCTensor at the expense of extra communication
    session = x1.session
    dtype = session.tensor_type

    shape = MPCTensor._get_shape("sub", x1.shape, x2.shape)
    n_values = shape.numel()

    CryptoPrimitiveProvider.generate_primitives(
        f"fss_{op}",
        sessions=session.session_ptrs,
        g_kwargs={"n_values": n_values},
        p_kwargs={},
    )

    args = zip(session.session_ptrs, x1.share_ptrs, x2.share_ptrs)
    args = [list(el) + [op] for el in args]

    shares = parallel_execution(mask_builder, session.parties)(args)

    # TODO: don't do .reconstruct(), this should be done remotely between the evaluators
    mask_value = MPCTensor(shares=shares, session=session)
    mask_value = mask_value.reconstruct(decode=False) % 2**n

    # TODO: add dtype to args
    args = [(session.session_ptrs[i], th.IntTensor([i]), mask_value, op)
            for i in range(2)]

    shares = parallel_execution(evaluate, session.parties)(args)

    response = MPCTensor(session=session, shares=shares, shape=shape)
    response.shape = shape
    return response
    def __reconstruct_semi_honest(
        share_ptrs: List["ReplicatedSharedTensor"],
        get_shares: bool = False,
    ) -> Union[torch.Tensor, List[torch.Tensor]]:
        """Reconstruct value from shares.

        Args:
            share_ptrs (List[ReplicatedSharedTensor]): List of RSTensor pointers.
            get_shares (bool): Retrieve only shares.

        Returns:
            reconstructed_value (torch.Tensor): Reconstructed value.
        """
        request = ReplicatedSharedTensor._request_and_get
        request_wrap = parallel_execution(request)
        args = [[share] for share in share_ptrs[:2]]
        local_shares = request_wrap(args)

        shares = [local_shares[0].shares[0]]
        shares.extend(local_shares[1].shares)

        if get_shares:
            return shares

        ring_size = local_shares[0].ring_size

        return ReplicatedSharedTensor.shares_sum(shares, ring_size)
Пример #3
0
def _reshape_max_pool2d(
    x: MPCTensor,
    kernel_size: Tuple[int, int],
    stride: Tuple[int, int],
    padding: Tuple[int, int],
    dilation: Tuple[int, int],
) -> MPCTensor:
    """Prepare the share tensors by calling the reshape function in parallel at each party.

    Args:
        x (MPCTensor): the MPCTensor on which to apply the reshape operation
        kernel_size (Tuple[int, int]): the kernel size
        stride (Tuple[int, int]): the stride size
        padding (Tuple[int, int]): the padding size
        dilation (Tuple[int, int]): the dilation size

    Returns:
        The reshaped MPCTensor.
    """
    session = x.session

    args = [[share, kernel_size, stride, padding, dilation]
            for share in x.share_ptrs]
    shares = parallel_execution(helper_max_pool2d_reshape,
                                session.parties)(args)

    res_shape = shares[0].shape.get()
    res = MPCTensor(shares=shares, session=session, shape=res_shape)
    return res
Пример #4
0
def test_get_grad_input_padding(get_clients, common_args: List,
                                nr_parties) -> None:
    clients = get_clients(2)
    session = Session(parties=clients)
    SessionManager.setup_mpc(session)

    grad = torch.Tensor([[[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0,
                                                              1.0]]]])
    grad_mpc = MPCTensor(secret=grad, session=session)

    input_size, stride, padding, kernel_size, dilation = common_args

    expected_padding = torch.nn.functional.grad._grad_input_padding(
        grad,
        input_size,
        (stride, stride),
        (padding, padding),
        kernel_size,
        (dilation, dilation),
    )

    args = [[el] + common_args + [session] for el in grad_mpc.share_ptrs]
    shares = parallel_execution(GradConv2d.get_grad_input_padding,
                                grad_mpc.session.parties)(args)
    grad_input_padding = MPCTensor(shares=shares, session=grad_mpc.session)
    output_padding_tensor = grad_input_padding.reconstruct()
    output_padding_tensor /= grad_mpc.session.nr_parties
    calculated_padding = tuple(output_padding_tensor.to(torch.int).tolist())

    assert calculated_padding == expected_padding
Пример #5
0
def stack(tensors: List, dim: int = 0) -> MPCTensor:
    """Concatenates a sequence of tensors along a new dimension.

    Args:
        tensors (List): sequence of tensors to stacks
        dim (int): dimension to insert. Has to be between 0 and the number of
            dimensions of concatenated tensors (inclusive)

    Returns:
        MPCTensor: calculated MPCTensor
    """
    session = tensors[0].session

    args = list(
        zip([str(uuid) for uuid in session.rank_to_uuid.values()],
            *[tensor.share_ptrs for tensor in tensors]))

    stack_shares = parallel_execution(stack_share_tensor,
                                      session.parties)(args)
    from sympc.tensor import MPCTensor

    expected_shape = torch.stack(
        [torch.empty(each_tensor.shape) for each_tensor in tensors],
        dim=dim).shape
    result = MPCTensor(shares=stack_shares,
                       session=session,
                       shape=expected_shape)

    return result
Пример #6
0
def cat(tensors: List, dim: int = 0) -> MPCTensor:
    """Concatenates the given sequence of seq tensors in the given dimension.

    Args:
        tensors (List): sequence of tensors to concatenate
        dim (int): the dimension over which the tensors are concatenated

    Returns:
        MPCTensor: calculated MPCTensor
    """
    session = tensors[0].session

    args = list(
        zip([str(uuid) for uuid in session.rank_to_uuid.values()],
            *[tensor.share_ptrs for tensor in tensors]))

    stack_shares = parallel_execution(cat_share_tensor, session.parties)(args)
    from sympc.tensor import MPCTensor

    expected_shape = torch.cat(
        [torch.empty(each_tensor.shape) for each_tensor in tensors],
        dim=dim).shape
    result = MPCTensor(shares=stack_shares,
                       session=session,
                       shape=expected_shape)

    return result
Пример #7
0
def mul_master(x: MPCTensor, y: MPCTensor, op_str: str) -> List[ShareTensor]:
    """Function that is executed by the orchestrator to multiply two secret values

    :return: a new set of shares that represents the multiplication
           between two secret values
    :rtype: MPCTensor
    """

    if op_str not in EXPECTED_OPS:
        raise ValueError(f"{op_str} should be in {EXPECTED_OPS}")

    a_sh, b_sh, c_sh = beaver.build_triples(x, y, op_str)
    eps = x - a_sh
    delta = y - b_sh
    session = x.session
    nr_parties = len(session.session_ptrs)

    eps_plaintext = eps.reconstruct(decode=False)
    delta_plaintext = delta.reconstruct(decode=False)

    # Arguments that must be sent to all parties
    common_args = [eps_plaintext, delta_plaintext, op_str]

    # Specific arguments to each party
    args = zip(session.session_ptrs, a_sh.share_ptrs, b_sh.share_ptrs, c_sh.share_ptrs)
    args = [list(el) + common_args for el in args]

    shares = parallel_execution(mul_parties, session.parties)(args)
    return shares
Пример #8
0
    def reconstruct(
            self,
            decode: bool = True,
            get_shares: bool = False
    ) -> Union[torch.Tensor, List[torch.Tensor]]:
        """Request and get the shares from all the parties and reconstruct the
        secret. Depending on the value of "decode", the secret would be decoded
        or not using the FixedPrecision Encoder specific for the session.

        Args:
            decode (bool): True if decode using FixedPointEncoder. Defaults to True
            get_shares (boot): True if get shares. Defaults to False.

        Returns:
            torch.Tensor. The secret reconstructed.
        """
        def _request_and_get(share_ptr: ShareTensor) -> ShareTensor:
            """Function used to request and get a share - Duet Setup

            Args:
                share_ptr (ShareTensor): a ShareTensor

            Returns:
                ShareTensor. The ShareTensor in local.

            """

            if not islocal(share_ptr):
                share_ptr.request(name="reconstruct", block=True)
            res = share_ptr.get_copy()
            return res

        request = _request_and_get

        request_wrap = parallel_execution(request)

        args = [[share] for share in self.share_ptrs]
        local_shares = request_wrap(args)

        tensor_type = self.session.tensor_type

        shares = [share.tensor for share in local_shares]

        if get_shares:
            return shares

        plaintext = sum(shares)

        if decode:
            fp_encoder = FixedPointEncoder(
                base=self.session.config.encoder_base,
                precision=self.session.config.encoder_precision,
            )

            plaintext = fp_encoder.decode(plaintext)

        return plaintext
Пример #9
0
def max_pool2d_backward(
    grad: MPCTensor,
    input_shape: Tuple[int],
    indices: MPCTensor,
    kernel_size: Union[int, Tuple[int, int]],
    stride: Optional[Union[int, Tuple[int, int]]] = None,
    padding: Union[int, Tuple[int, int]] = 0,
    dilation: Union[int, Tuple[int, int]] = 1,
) -> MPCTensor:
    """Helper function for the backwards step for max_pool2d.

    Credits goes to the CrypTen team.

    Args:
        grad (MPCTensor): gradient that comes from the child node
        input_shape (Tuple[int]): the shape of the input when the max_pool2d was run
        indices (MPCTensor): the indices where the maximum value was found in the input
        kernel_size (Union[int, Tuple[int, int]]): the kernel size
            in case it is passed as an integer then that specific value is used for height and width
        stride (Union[int, Tuple[int, int]]): the stride size
            in case it is passed as an integer then that specific value is used for height and width
        padding (Union[int, Tuple[int, int]]): the padding size
            in case it is passed as an integer then that specific value is used for height and width
        dilation (Union[int, Tuple[int, int]]): the dilation size
            in case it is passed as an integer then that specific value is used for height and width

    Returns:
        The gradient that should be backpropagated (MPCTensor)

    Raises:
        ValueError: In case some of the values for the parameters are not supported
    """
    kernel_size, stride, padding, dilation = _sanity_check_max_pool2d(
        kernel_size, stride, padding, dilation)
    if len(grad.shape) != 4 and len(grad.shape) != 3:
        raise ValueError(
            f"Expected gradient to have 3/4 dimensions (4 with batch). Found {len(grad.shape)}"
        )

    if len(indices.shape) != len(grad.shape) + 2:
        raise ValueError(
            "Expected indices shape to have 2 extra dimensions because of "
            f"(kernel_size, kernel_size), but has {len(indices.shape)}")

    session = grad.session

    mappings = grad.view(grad.shape + (1, 1)) * indices
    args = [[tuple(input_shape), grads_share, kernel_size, stride, padding]
            for grads_share in mappings.share_ptrs]
    shares = parallel_execution(max_pool2d_backward_helper,
                                session.parties)(args)

    res = MPCTensor(shares=shares, shape=input_shape, session=session)
    return res
Пример #10
0
def mul_master(x: MPCTensor, y: MPCTensor, op_str: str) -> MPCTensor:
    """Function that is executed by the orchestrator to multiply two secret values

    :return: a new set of shares that represents the multiplication
           between two secret values
    :rtype: MPCTensor
    """

    if op_str not in EXPECTED_OPS:
        raise ValueError(f"{op_str} should be in {EXPECTED_OPS}")

    session = x.session

    shape_x = tuple(x.shape)
    shape_y = tuple(y.shape)

    primitives = CryptoPrimitiveProvider.generate_primitives(
        f"beaver_{op_str}",
        sessions=session.session_ptrs,
        g_kwargs={
            "a_shape": shape_x,
            "b_shape": shape_y,
            "nr_parties": session.nr_parties,
        },
        p_kwargs={
            "a_shape": shape_x,
            "b_shape": shape_y
        },
    )

    a_sh, b_sh, c_sh = primitives[0]

    a_mpc = MPCTensor(shares=a_sh, shape=x.shape, session=session)
    b_mpc = MPCTensor(shares=b_sh, shape=y.shape, session=session)

    eps = x - a_mpc
    delta = y - b_mpc

    eps_plaintext = eps.reconstruct(decode=False)
    delta_plaintext = delta.reconstruct(decode=False)

    # Arguments that must be sent to all parties
    common_args = [eps_plaintext, delta_plaintext, op_str]

    # Specific arguments to each party
    args = [[el] + common_args for el in session.session_ptrs]

    shares = parallel_execution(mul_parties, session.parties)(args)
    result = MPCTensor(shares=shares, shape=c_sh[0].shape, session=session)

    return result
Пример #11
0
def public_divide(x: MPCTensor, y: Union[torch.Tensor, int]) -> MPCTensor:
    """Function that is executed by the orchestrator to divide a secret by a public value.

    Args:
        x (MPCTensor): Private numerator.
        y (Union[torch.Tensor, int]): Public denominator.

    Returns:
        MPCTensor: A new set of shares that represents the division.
    """
    session = x.session
    res_shape = x.shape

    if session.nr_parties == 2:
        shares = [operator.truediv(share, y) for share in x.share_ptrs]
        return MPCTensor(shares=shares, session=session, shape=res_shape)

    primitives = CryptoPrimitiveProvider.generate_primitives(
        "beaver_wraps",
        session=session,
        g_kwargs={
            "nr_parties": session.nr_parties,
            "shape": res_shape
        },
        p_kwargs=None,
    )

    r_sh, theta_r_sh = list(zip(*list(zip(*primitives))[0]))

    r_mpc = MPCTensor(shares=r_sh, session=session, shape=x.shape)

    z = r_mpc + x
    z_shares_local = z.get_shares()

    common_args = [z_shares_local, y]
    args = zip(
        r_mpc.share_ptrs,
        theta_r_sh,
        x.share_ptrs,
    )
    args = [list(el) + common_args for el in args]

    theta_x = parallel_execution(div_wraps, session.parties)(args)
    theta_x_plaintext = MPCTensor(shares=theta_x,
                                  session=session).reconstruct()

    res = x - theta_x_plaintext * 4 * ((session.ring_size // 4) // y)

    return res
Пример #12
0
    def reconstruct(
        share_ptrs: List["ShareTensor"],
        get_shares=False,
        security_type: str = "semi-honest",
    ) -> torch.Tensor:
        """Reconstruct original value from shares.

        Args:
            share_ptrs (List[ShareTensor]): List of sharetensors.
            get_shares (boolean): retrieve shares or reconstructed value.
            security_type (str): Type of security by protocol.

        Returns:
            plaintext/shares (torch.Tensor/List[torch.Tensors]): Plaintext or list of shares.

        """
        def _request_and_get(share_ptr: ShareTensor) -> ShareTensor:
            """Function used to request and get a share - Duet Setup.

            Args:
                share_ptr (ShareTensor): a ShareTensor

            Returns:
                ShareTensor. The ShareTensor in local.

            """
            if not ispointer(share_ptr):
                return share_ptr
            if not islocal(share_ptr):
                share_ptr.request(block=True)

            res = share_ptr.get_copy()
            return res

        request = _request_and_get
        request_wrap = parallel_execution(request)

        args = [[share] for share in share_ptrs]
        local_shares = request_wrap(args)

        shares = [share.tensor for share in local_shares]

        if get_shares:
            return shares

        plaintext = sum(shares)

        return plaintext
Пример #13
0
    def bit_injection(x: MPCTensor, session: Session,
                      ring_size: int) -> MPCTensor:
        """Perform ABY3 bit injection for conversion of binary share to arithmetic share.

        Args:
            x (MPCTensor) : MPCTensor with shares of bit.
            session (Session): session the share belongs to.
            ring_size (int) : Ring size of arithmetic share to convert.

        Returns:
            arith_share (MPCTensor): Arithmetic shares of bit in input ring size.

        Raises:
            ValueError: If input tensor is not binary shared.
            ValueError: If the exactly three parties are not involved in the computation.
        """
        input_ring = int(
            x.share_ptrs[0].get_ring_size().get_copy())  # input ring_size
        if input_ring != 2:
            raise ValueError("Bit injection works only for binary rings")

        if session.nr_parties != NR_PARTIES:
            raise ValueError("ABY3 bit_injection requires 3 parties")

        args = [[share, str(ring_size)] for share in x.share_ptrs]

        decomposed_shares = parallel_execution(ABY3.local_decomposition,
                                               session.parties)(args)

        # Using zip for grouping on pointers is compute intensive.
        x1_share = []
        x2_share = []
        x3_share = []

        for share in list(
                map(lambda x: x[0].resolve_pointer_type(), decomposed_shares)):
            x1_share.append(share[0].resolve_pointer_type())
            x2_share.append(share[1].resolve_pointer_type())
            x3_share.append(share[2].resolve_pointer_type())

        x1 = MPCTensor(shares=x1_share, session=session, shape=x.shape)
        x2 = MPCTensor(shares=x2_share, session=session, shape=x.shape)
        x3 = MPCTensor(shares=x3_share, session=session, shape=x.shape)

        arith_share = x1 ^ x2 ^ x3

        return arith_share
Пример #14
0
    def mul_semi_honest(
        x: MPCTensor,
        y: MPCTensor,
        session: Session,
        op_str: str,
        ring_size: int,
        config: Config,
        reshare: bool = False,
        **kwargs_: Dict[Any, Any],
    ) -> MPCTensor:
        """Falcon semihonest multiplication.

        Performs Falcon's mul implementation, add masks and performs resharing.

        Args:
            x (MPCTensor): Secret
            y (MPCTensor): Another secret
            session (Session): Session the tensors belong to
            op_str (str): Operation string.
            ring_size (int) : Ring size of the underlying tensors.
            config (Config): The configuration(base,precision) of the underlying tensor.
            reshare (bool) : Convert 3-out-3 to 2-out-3 if set.
            kwargs_ (Dict[Any, Any]): Kwargs for some operations like conv2d

        Returns:
            MPCTensor: Result of the operation.
        """
        args = [[x_share, y_share, op_str]
                for x_share, y_share in zip(x.share_ptrs, y.share_ptrs)]

        z_shares_ptrs = parallel_execution(Falcon.compute_zvalue_and_add_mask,
                                           session.parties)(args, kwargs_)

        result = MPCTensor(shares=z_shares_ptrs, session=x.session)

        if reshare:
            z_shares = [share.get() for share in z_shares_ptrs]

            # Convert 3-3 shares to 2-3 shares by resharing
            reshared_shares = ReplicatedSharedTensor.distribute_shares(
                z_shares, x.session, ring_size, config)
            result = MPCTensor(shares=reshared_shares, session=x.session)

        result.shape = MPCTensor._get_shape(op_str, x.shape,
                                            y.shape)  # for prrs
        return result
Пример #15
0
    def __reconstruct_malicious(
        share_ptrs: List["ReplicatedSharedTensor"],
        get_shares: bool = False,
    ) -> Union[torch.Tensor, List[torch.Tensor]]:
        """Reconstruct value from shares.

        Args:
            share_ptrs (List[ReplicatedSharedTensor]): List of RSTensor pointers.
            get_shares (bool): Retrieve only shares.

        Returns:
            reconstructed_value (torch.Tensor): Reconstructed value.

        Raises:
            ValueError: When parties share values are not equal.
        """
        nparties = len(share_ptrs)

        # Get shares from all parties
        request = ReplicatedSharedTensor._request_and_get
        request_wrap = parallel_execution(request)
        args = [[share] for share in share_ptrs]
        local_shares = request_wrap(args)
        ring_size = local_shares[0].ring_size
        shares_sum = ReplicatedSharedTensor.shares_sum

        all_shares = [rst.shares for rst in local_shares]
        # reconstruct shares from all parties and verify
        value = None
        for party_rank in range(nparties):
            tensor = shares_sum(
                [all_shares[party_rank][0]] + all_shares[(party_rank + 1) %
                                                         (nparties)],
                ring_size,
            )

            if value is None:
                value = tensor
            elif (tensor != value).any():
                raise ValueError(
                    "Reconstruction values from all parties are not equal.")

        if get_shares:
            return all_shares

        return value
Пример #16
0
def public_divide(x: MPCTensor, y: Union[torch.Tensor, int]) -> MPCTensor:
    """Function that is executed by the orchestrator to divide a secret by a value
    (that value is public)

    :return: a new set of shares that represents the multiplication
           between two secret values
    :rtype: MPCTensor
    """

    session = x.session
    res_shape = x.shape

    if session.nr_parties == 2:
        shares = [operator.truediv(share, y) for share in x.share_ptrs]
        return MPCTensor(shares=shares, session=session, shape=res_shape)

    primitives = CryptoPrimitiveProvider.generate_primitives(
        "beaver_wraps",
        sessions=session.session_ptrs,
        g_kwargs={
            "nr_parties": session.nr_parties,
            "shape": res_shape
        },
        p_kwargs=None,
    )

    r_sh, theta_r_sh = primitives[0]

    r_mpc = MPCTensor(shares=r_sh, session=session, shape=x.shape)

    z = r_mpc + x
    z_shares_local = z.get_shares()

    common_args = [z_shares_local, y]
    args = zip(session.session_ptrs, r_mpc.share_ptrs, theta_r_sh,
               x.share_ptrs)
    args = [list(el) + common_args for el in args]

    theta_x = parallel_execution(div_wraps, session.parties)(args)
    theta_x_plaintext = MPCTensor(shares=theta_x,
                                  session=session).reconstruct()

    res = x - theta_x_plaintext * 4 * ((session.ring_size // 4) // y)

    return res
Пример #17
0
    def bit_decomposition(x: MPCTensor, session: Session) -> List[MPCTensor]:
        """Perform ABY3 bit decomposition for conversion of arithmetic share to binary share.

        Args:
            x (MPCTensor): Arithmetic shares of secret.
            session (Session): session the share belongs to.

        Returns:
            bin_share (List[MPCTensor]): Returns binary shares of each bit of the secret.

        TODO : Should be modified to use parallel prefix adder when multiprocessing
        functionality is integrated,currently unused.
        """
        x1: List[MPCTensor] = []  # bit shares of shares
        x2: List[MPCTensor] = []
        x3: List[MPCTensor] = []

        args = [[share, "2", True] for share in x.share_ptrs]

        decomposed_shares = parallel_execution(ABY3.local_decomposition,
                                               session.parties)(args)

        # Initially we have have List[p1,p2,p3] where p1,p2,p3 are list returned from parties.
        # Each of p1,p2,p3 is List[ [x1,x2,x3] ,...] in bit length of the session ring size.
        # Each element of the list is a share of the shares for each bit.
        x_sh = itertools.starmap(zip, zip(*decomposed_shares))

        for x1_sh, x2_sh, x3_sh in x_sh:
            x1_sh = [ptr.resolve_pointer_type() for ptr in x1_sh]
            x2_sh = [ptr.resolve_pointer_type() for ptr in x2_sh]
            x3_sh = [ptr.resolve_pointer_type() for ptr in x3_sh]

            x1_m = MPCTensor(shares=x1_sh, session=session, shape=x.shape)
            x2_m = MPCTensor(shares=x2_sh, session=session, shape=x.shape)
            x3_m = MPCTensor(shares=x3_sh, session=session, shape=x.shape)

            x1.append(x1_m)
            x2.append(x2_m)
            x3.append(x3_m)

        x1_2 = ABY3.full_adder(x1, x2, session)
        bin_share = ABY3.full_adder(x1_2, x3, session)

        return bin_share
Пример #18
0
    def reconstruct(self, decode: bool = True) -> torch.Tensor:
        """Request and get the shares from all the parties and reconstruct the secret.
        Depending on the value of "decode", the secret would be decoded or not using
        the FixedPrecision Encoder specific for the session

        :return: the secret reconstructed
        :rtype: tensor
        """
        def _request_and_get(share_ptr: ShareTensor) -> ShareTensor:
            """Function used to request and get a share - Duet Setup
            :return: the ShareTensor (local)
            :rtype: ShareTensor
            """

            if not islocal(share_ptr):
                share_ptr.request(name="reconstruct", block=True)
            res = share_ptr.get_copy()
            return res

        request = _request_and_get

        request_wrap = parallel_execution(request)

        args = [[share] for share in self.share_ptrs]
        local_shares = request_wrap(args)

        tensor_type = self.session.tensor_type

        plaintext = sum(share.tensor for share in local_shares)

        if decode:
            fp_encoder = FixedPointEncoder(
                base=self.session.config.encoder_base,
                precision=self.session.config.encoder_precision,
            )

            plaintext = fp_encoder.decode(plaintext)

        return plaintext
Пример #19
0
def test_local_decomposition(get_clients, security_type):
    parties = get_clients(3)
    falcon = Falcon(security_type=security_type)
    session = Session(parties=parties, protocol=falcon)
    SessionManager.setup_mpc(session)

    one = torch.tensor([1], dtype=torch.bool)
    zero = torch.tensor([0], dtype=torch.bool)
    shares = [one, one, one]
    ptr_lst = ReplicatedSharedTensor.distribute_shares(shares,
                                                       session,
                                                       ring_size=2)
    x = MPCTensor(shares=ptr_lst, session=session, shape=one.shape)
    ring_size = session.ring_size
    args = [[share, str(ring_size)] for share in x.share_ptrs]

    decompose = parallel_execution(ABY3.local_decomposition,
                                   session.parties)(args)

    x1_sh, x2_sh, x3_sh = zip(*map(lambda x: x[0], decompose))

    x1_sh = [ptr.get_copy().shares for ptr in x1_sh]
    x2_sh = [ptr.get_copy().shares for ptr in x2_sh]
    x3_sh = [ptr.get_copy().shares for ptr in x3_sh]

    tensor_type = x.session.tensor_type
    one = one.type(tensor_type)
    zero = zero.type(tensor_type)

    exp_x1 = [[one, zero], [zero, zero], [zero, one]]
    exp_x2 = [[zero, one], [one, zero], [zero, zero]]
    exp_x3 = [[zero, zero], [zero, one], [one, zero]]

    assert x1_sh == exp_x1
    assert x2_sh == exp_x2
    assert x3_sh == exp_x3
Пример #20
0
def fss_op(x1: MPCTensor, x2: MPCTensor, op="eq") -> MPCTensor:
    """Define the workflow for a binary operation using Function Secret Sharing.

    Currently supported operand are = & <=, respectively corresponding to
    op = 'eq' and 'comp'.

    Args:
        x1 (MPCTensor): First private value.
        x2 (MPCTensor): Second private value.
        op: Type of operation to perform, should be 'eq' or 'comp'. Defaults to eq.

    Returns:
        MPCTensor: Shares of the comparison.
    """
    if th.cuda.is_available():
        # FSS is currently not supported on GPU.
        # https://stackoverflow.com/a/62145307/8878627

        # When the CUDA_VISIBLE_DEVICES environment variable is not set,
        # CUDA is not used even if available. Hence, we default to None
        cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES", None)
        os.environ["CUDA_VISIBLE_DEVICES"] = ""
        warnings.warn("Temporarily disabling CUDA as FSS does not support it")
    else:
        cuda_visible_devices = None

    # FIXME: Better handle the case where x1 or x2 is not a MPCTensor. For the moment
    # FIXME: we cast it into a MPCTensor at the expense of extra communication
    session = x1.session

    shape = MPCTensor._get_shape("sub", x1.shape, x2.shape)
    n_values = shape.numel()

    CryptoPrimitiveProvider.generate_primitives(
        f"fss_{op}",
        sessions=session.session_ptrs,
        g_kwargs={"n_values": n_values},
        p_kwargs={},
    )

    args = zip(session.session_ptrs, x1.share_ptrs, x2.share_ptrs)
    args = [list(el) + [op] for el in args]

    shares = parallel_execution(mask_builder, session.parties)(args)

    # TODO: don't do .reconstruct(), this should be done remotely between the evaluators
    mask_value = MPCTensor(shares=shares, session=session)
    mask_value = mask_value.reconstruct(decode=False) % 2**n

    # TODO: add dtype to args
    args = [(session.session_ptrs[i], th.IntTensor([i]), mask_value, op)
            for i in range(2)]

    shares = parallel_execution(evaluate, session.parties)(args)

    response = MPCTensor(session=session, shares=shares, shape=shape)
    response.shape = shape

    if cuda_visible_devices is not None:
        os.environ["CUDA_VISIBLE_DEVICES"] = cuda_visible_devices

    return response
Пример #21
0
def helper_argmax(
    x: MPCTensor,
    dim: Optional[Union[int, Tuple[int]]] = None,
    keepdim: bool = False,
    one_hot: bool = False,
) -> MPCTensor:
    """Compute argmax using pairwise comparisons. Makes the number of rounds fixed, here it is 2.

    This is inspired from CrypTen.

    Args:
        x (MPCTensor): the MPCTensor on which to compute helper_argmax on
        dim (Union[int, Tuple[int]): compute argmax over a specific dimension(s)
        keepdim (bool): when one_hot is true, keep all the dimensions of the tensor
        one_hot (bool): return the argmax as a one hot vector

    Returns:
        Given the args, it returns a one hot encoding (as an MPCTensor) or the index
        of the maximum value

    Raises:
        ValueError: In case more max values are found and we need to return the index
    """
    # for each share in MPCTensor
    #   do the algorithm portrayed in paper (helper_argmax_pairwise)
    #   results in creating two matrices and subtraction them
    session = x.session

    prep_x = x.flatten() if dim is None else x
    args = [[str(uuid), share_ptr_tensor,
             dim] for uuid, share_ptr_tensor in zip(
                 session.rank_to_uuid.values(), prep_x.share_ptrs)]
    shares = parallel_execution(helper_argmax_pairwise, session.parties)(args)

    res_shape = shares[0].shape.get()
    x_pairwise = MPCTensor(shares=shares, session=x.session, shape=res_shape)

    # with the MPCTensor tensor we check what entries are positive
    # then we check what columns of M matrix have m-1 non-zero entries after comparison
    # (by summing over cols)
    pairwise_comparisons = x_pairwise >= 0

    # re-compute row_length
    _dim = -1 if dim is None else dim
    row_length = x.shape[_dim] if x.shape[_dim] > 1 else 2

    result = pairwise_comparisons.sum(0)
    result = result >= (row_length - 1)
    res_shape = res_shape[1:]  # Remove the leading dimension because of sum(0)

    if not one_hot:
        if dim is None:
            check = result * torch.Tensor(
                [i for i in range(np.prod(res_shape))])
        else:
            size = [1 for _ in range(len(res_shape))]
            size[dim] = res_shape[dim]
            check = result * torch.Tensor([i for i in range(res_shape[_dim])
                                           ]).view(size)

        if dim is not None:
            argmax = check.sum(dim=dim, keepdim=keepdim)
        else:
            argmax = check.sum()
            if (argmax >= row_length).reconstruct():
                # In case we have 2 max values, rather then returning an invalid index
                # we raise an exception
                raise ValueError("There are multiple argmax values")

        result = argmax

    return result
Пример #22
0
def mul_master(x: MPCTensor, y: MPCTensor, op_str: str,
               kwargs_: Dict[Any, Any]) -> MPCTensor:
    """Function that is executed by the orchestrator to multiply two secret values.

    Args:
        x (MPCTensor): First value to multiply with.
        y (MPCTensor): Second value to multiply with.
        op_str (str): Operation string.
        kwargs_ (dict): TODO:Add docstring.

    Raises:
        ValueError: If op_str not in EXPECTED_OPS.

    Returns:
        MPCTensor: Result of the multiplication.
    """
    if op_str not in EXPECTED_OPS:
        raise ValueError(f"{op_str} should be in {EXPECTED_OPS}")

    session = x.session
    shape_x = tuple(x.shape)
    shape_y = tuple(y.shape)

    args = [list(el) + [op_str] for el in zip(x.share_ptrs, y.share_ptrs)]

    try:
        mask = parallel_execution(spdz_mask, session.parties)(args)
    except EmptyPrimitiveStore:
        CryptoPrimitiveProvider.generate_primitives(
            f"beaver_{op_str}",
            session=session,
            g_kwargs={
                "session": session,
                "a_shape": shape_x,
                "b_shape": shape_y,
                "nr_parties": session.nr_parties,
                **kwargs_,
            },
            p_kwargs={
                "a_shape": shape_x,
                "b_shape": shape_y
            },
        )
        mask = parallel_execution(spdz_mask, session.parties)(args)

    # zip on pointers is compute intensive
    mask_local = [mask[idx].get() for idx in range(session.nr_parties)]
    eps_shares, delta_shares = zip(*mask_local)

    eps_plaintext = ShareTensor.reconstruct(eps_shares)
    delta_plaintext = ShareTensor.reconstruct(delta_shares)

    # Specific arguments to each party
    args = [[str(remote_session_uuid), eps_plaintext, delta_plaintext, op_str]
            for remote_session_uuid in session.rank_to_uuid.values()]

    shares = parallel_execution(mul_parties, session.parties)(args, kwargs_)

    result = MPCTensor(shares=shares, session=session)

    return result
Пример #23
0
def mul_master(x: MPCTensor, y: MPCTensor, op_str: str,
               kwargs_: Dict[Any, Any]) -> MPCTensor:
    """Function that is executed by the orchestrator to multiply two secret values.

    Args:
        x (MPCTensor): First value to multiply with.
        y (MPCTensor): Second value to multiply with.
        op_str (str): Operation string.
        kwargs_ (dict): TODO:Add docstring.

    Raises:
        ValueError: If op_str not in EXPECTED_OPS.

    Returns:
        MPCTensor: Result of the multiplication.
    """
    if op_str not in EXPECTED_OPS:
        raise ValueError(f"{op_str} should be in {EXPECTED_OPS}")

    session = x.session

    shape_x = tuple(x.shape)
    shape_y = tuple(y.shape)

    CryptoPrimitiveProvider.generate_primitives(
        f"beaver_{op_str}",
        sessions=session.session_ptrs,
        g_kwargs={
            "a_shape": shape_x,
            "b_shape": shape_y,
            "nr_parties": session.nr_parties,
            **kwargs_,
        },
        p_kwargs={
            "a_shape": shape_x,
            "b_shape": shape_y
        },
    )

    args = [
        list(el) + [op_str]
        for el in zip(session.session_ptrs, x.share_ptrs, y.share_ptrs)
    ]

    mask = parallel_execution(spdz_mask, session.parties)(args)
    eps_shares, delta_shares = zip(*mask)

    eps = MPCTensor(shares=eps_shares, session=session)
    delta = MPCTensor(shares=delta_shares, session=session)

    eps_plaintext = eps.reconstruct(decode=False)
    delta_plaintext = delta.reconstruct(decode=False)

    # Arguments that must be sent to all parties
    common_args = [eps_plaintext, delta_plaintext, op_str]

    # Specific arguments to each party
    args = [[el] + common_args for el in session.session_ptrs]

    shares = parallel_execution(mul_parties, session.parties)(args, kwargs_)

    result = MPCTensor(shares=shares, session=session)

    return result
Пример #24
0
    def mul_malicious(
        x: MPCTensor,
        y: MPCTensor,
        session: Session,
        op_str: str,
        ring_size: int,
        config: Config,
        **kwargs_: Dict[Any, Any],
    ) -> MPCTensor:
        """Falcon malicious multiplication.

        Args:
            x (MPCTensor): Secret
            y (MPCTensor): Another secret
            session (Session): Session the tensors belong to
            op_str (str): Operation string.
            ring_size (int) : Ring size of the underlying tensor.
            config (Config): The configuration(base,precision) of the underlying tensor.
            kwargs_ (Dict[Any, Any]): Kwargs for some operations like conv2d

        Returns:
            result(MPCTensor): Result of the operation.

        Raises:
            ValueError : If the shares are not valid.
        """
        shape_x = tuple(x.shape)
        shape_y = tuple(y.shape)

        result = Falcon.mul_semi_honest(x,
                                        y,
                                        session,
                                        op_str,
                                        ring_size,
                                        config,
                                        reshare=True,
                                        **kwargs_)

        args = [list(sh) + [op_str] for sh in zip(x.share_ptrs, y.share_ptrs)]
        try:
            mask = parallel_execution(Falcon.falcon_mask,
                                      session.parties)(args)
        except EmptyPrimitiveStore:
            CryptoPrimitiveProvider.generate_primitives(
                f"beaver_{op_str}",
                session=session,
                g_kwargs={
                    "session": session,
                    "a_shape": shape_x,
                    "b_shape": shape_y,
                    "nr_parties": session.nr_parties,
                    "ring_size": ring_size,
                    "config": config,
                    **kwargs_,
                },
                p_kwargs={
                    "a_shape": shape_x,
                    "b_shape": shape_y
                },
            )
            mask = parallel_execution(Falcon.falcon_mask,
                                      session.parties)(args)

        # zip on pointers is compute intensive
        mask_local = [mask[idx].get() for idx in range(session.nr_parties)]
        eps_shares, delta_shares = zip(*mask_local)

        eps_plaintext = ReplicatedSharedTensor.reconstruct(eps_shares)
        delta_plaintext = ReplicatedSharedTensor.reconstruct(delta_shares)

        args = [
            list(sh) + [eps_plaintext, delta_plaintext, op_str]
            for sh in zip(result.share_ptrs)
        ]

        triple_shares = parallel_execution(Falcon.triple_verification,
                                           session.parties)(args, kwargs_)

        triple = MPCTensor(shares=triple_shares, session=x.session)

        if (triple.reconstruct(decode=False) == 0).all():
            return result
        else:
            raise ValueError("Computation Aborted: Malicious behavior.")