Exemplo n.º 1
0
def build_triples(x: MPCTensor, y: MPCTensor,
                  op_str: str) -> Tuple[MPCTensor, MPCTensor, MPCTensor]:
    """
    The Trusted Third Party (TTP) or Crypto Provider should provide this triples
    Currently, the one that orchestrates the communication provides those
    """
    if op_str not in EXPECTED_OPS:
        raise ValueError(f"{op_str} should be in {EXPECTED_OPS}")

    shape_x = x.shape
    shape_y = y.shape

    session = x.session

    a = ShareTensor(session=session)
    a.tensor = torch.empty(size=shape_x,
                           dtype=torch.long).random_(generator=ttp_generator)

    b = ShareTensor(session=session)
    b.tensor = torch.empty(size=shape_y,
                           dtype=torch.long).random_(generator=ttp_generator)

    cmd = getattr(operator, op_str)

    # Manually place the tensor and use the same session such that we do
    # not encode the result
    c = ShareTensor(session=session)
    c.tensor = cmd(a.tensor, b.tensor)

    a_sh = MPCTensor(secret=a, session=session)
    b_sh = MPCTensor(secret=b, session=session)
    c_sh = MPCTensor(secret=c, session=session)

    return a_sh, b_sh, c_sh
Exemplo n.º 2
0
    def przs_generate_random_share(
        self, shape: Union[tuple, torch.Size], generators: List[torch.Generator]
    ) -> Any:
        """Generate a random share using the two generators that are
        hold by a party.
        """

        from sympc.tensor import ShareTensor

        gen0, gen1 = generators

        current_share = generate_random_element(
            tensor_type=self.tensor_type,
            generator=gen0,
            shape=shape,
        )

        next_share = generate_random_element(
            tensor_type=self.tensor_type,
            generator=gen1,
            shape=shape,
        )

        share = ShareTensor(session=self)
        share.tensor = current_share - next_share

        return share
Exemplo n.º 3
0
def div_wraps(
    session: Session,
    r_share: ShareTensor,
    theta_r: ShareTensor,
    x_share: ShareTensor,
    z_shares: List[torch.Tensor],
    y: Union[torch.Tensor, int],
) -> ShareTensor:
    """
    From CrypTen
    Privately computes the number of wraparounds for a set a shares

    To do so, we note that:
        [theta_x] = theta_z + [beta_xr] - [theta_r] - [eta_xr]

    Where:
        [theta_x] is the wraps for a variable x
        [beta_xr] is the differential wraps for variables x and r
        [eta_xr]  is the plaintext wraps for variables x and r

    Note: Since [eta_xr] = 0 with probability 1 - |x| / Q for modulus Q, we
    can make the assumption that [eta_xr] = 0 with high probability.
    """

    beta_xr = count_wraps([x_share.tensor, r_share.tensor])
    theta_x = ShareTensor(encoder_precision=0)
    theta_x.tensor = beta_xr - theta_r.tensor

    if session.rank == 0:
        theta_z = count_wraps(z_shares)
        theta_x.tensor += theta_z

    x_share.tensor //= y

    return theta_x
Exemplo n.º 4
0
def max_pool2d_backward_helper(
    input_shape: Tuple[int],
    grads_share: ShareTensor,
    kernel_size: Tuple[int, int],
    stride: Tuple[int, int],
    padding: Tuple[int, int],
) -> ShareTensor:
    """Helper function to compute the gradient needed to be passed to the parent node.

    Args:
        input_shape (Tuple[int]): the size of the input tensor when running max_pool2d
        grads_share (ShareTensor): the share for the output gradient specific to this party
        kernel_size (Tuple[int, int]): the kernel size
        stride (Tuple[int, int]): the stride size
        padding (Tuple[int, int]): the padding size

    Returns:
        A ShareTensor specific for the computed gradient

    Raises:
        ValueError: if the input shape (taken into consideration the padding) is smaller than the
            kernel shape
    """
    session = get_session(str(grads_share.session_uuid))

    res_shape = input_shape[:-2]
    res_shape += (input_shape[-2] + 2 * padding[0],
                  input_shape[-1] + 2 * padding[1])

    if res_shape[-2] < kernel_size[0] or res_shape[-1] < kernel_size[1]:
        raise ValueError(
            f"Kernel size ({kernel_size}) has more elements on an axis than "
            f"input shape ({res_shape}) considering padding of {padding}")

    tensor_type = session.tensor_type
    tensor = torch.zeros(res_shape, dtype=tensor_type)

    for i in range((res_shape[-2] - kernel_size[0]) // stride[0] + 1):
        row_idx = i * stride[0]
        for j in range((res_shape[-1] - kernel_size[1]) // stride[1] + 1):
            col_idx = j * stride[1]
            if len(res_shape) == 4:
                tensor[:, :, row_idx:row_idx + kernel_size[0],
                       col_idx:col_idx +
                       kernel_size[1], ] += grads_share.tensor[:, :, i, j]
            else:
                tensor[:, row_idx:row_idx + kernel_size[0], col_idx:col_idx +
                       kernel_size[1], ] += grads_share.tensor[:, i, j]

    if len(res_shape) == 4:
        tensor = tensor[:, :, padding[0]:input_shape[-2],
                        padding[1]:input_shape[-1]]
    else:
        tensor = tensor[:, padding[0]:res_shape[-2] - padding[0],
                        padding[1]:res_shape[-1] - padding[1], ]
    res = ShareTensor(config=grads_share.config)
    res.tensor = tensor

    return res
Exemplo n.º 5
0
def proto2object(proto: ShareTensor_PB) -> ShareTensor:
    session = protobuf_session_deserializer(proto=proto.session)

    data = protobuf_tensor_deserializer(proto.tensor.tensor)
    share = ShareTensor(data=None, session=session)

    # Manually put the tensor since we do not want to re-encode it
    share.tensor = data.type(session.tensor_type)

    return share
Exemplo n.º 6
0
Arquivo: spdz.py Projeto: NiWaRe/SyMPC
def mul_parties(session_uuid_str: str, eps: torch.Tensor, delta: torch.Tensor,
                op_str: str, **kwargs) -> ShareTensor:
    """SPDZ Multiplication.

    Args:
        session_uuid_str (str): UUID to identify the session on each party side.
        eps (torch:tensor): Epsilon value of the protocol.
        delta (torch.Tensor): Delta value of the protocol.
        op_str (str): Operator string.
        kwargs: Keywords arguments for the operator.

    Returns:
        ShareTensor: Shared result of the division.
    """
    session = get_session(session_uuid_str)

    crypto_store = session.crypto_store
    eps_shape = tuple(eps.shape)
    delta_shape = tuple(delta.shape)

    primitives = crypto_store.get_primitives_from_store(
        f"beaver_{op_str}", eps_shape, delta_shape)

    a_share, b_share, c_share = primitives

    if op_str in ["conv2d", "conv_transpose2d"]:
        op = getattr(torch, op_str)
    else:
        op = getattr(operator, op_str)

    eps_b = op(eps, b_share.tensor, **kwargs)
    delta_a = op(a_share.tensor, delta, **kwargs)

    share_tensor = c_share.tensor + eps_b + delta_a
    if session.rank == 0:
        delta_eps = op(eps, delta, **kwargs)
        share_tensor += delta_eps

    # Convert to our tensor type
    share_tensor = share_tensor.type(session.tensor_type)

    share = ShareTensor(session_uuid=UUID(session_uuid_str),
                        config=session.config)
    share.tensor = share_tensor

    # Ideally this should stay in the MPCTensor
    # Step 1. Do spdz_mul
    # Step 2. Divide by scale
    # This is done here to reduce one round of communication
    if session.nr_parties == 2:
        share.tensor //= share.fp_encoder.scale

    return share
Exemplo n.º 7
0
def mul_parties(
    session: Session,
    eps: torch.Tensor,
    delta: torch.Tensor,
    op_str: str,
) -> ShareTensor:
    """
    [c] = [a * b]
    [eps] = [x] - [a]
    [delta] = [y] - [b]

    Open eps and delta
    [result] = [c] + eps * [b] + delta * [a] + eps * delta

    :return: the ShareTensor for the multiplication
    :rtype: ShareTensor (in our case ShareTensorPointer)
    """

    crypto_store = session.crypto_store
    eps_shape = tuple(eps.shape)
    delta_shape = tuple(delta.shape)

    primitives = crypto_store.get_primitives_from_store(
        f"beaver_{op_str}", eps_shape, delta_shape)

    a_share, b_share, c_share = primitives
    op = getattr(operator, op_str)

    eps_b = op(eps, b_share.tensor)
    delta_a = op(a_share.tensor, delta)

    share_tensor = c_share.tensor + eps_b + delta_a
    if session.rank == 0:
        delta_eps = op(eps, delta)
        share_tensor += delta_eps

    # Convert to our tensor type
    share_tensor = share_tensor.type(session.tensor_type)

    share = ShareTensor(session=session)
    share.tensor = share_tensor

    # Ideally this should stay in the MPCTensor
    # Step 1. Do spdz_mul
    # Step 2. Divide by scale
    # This is done here to reduce one round of communication
    if session.nr_parties == 2:
        share.tensor //= share.fp_encoder.scale

    return share
Exemplo n.º 8
0
Arquivo: spdz.py Projeto: NiWaRe/SyMPC
def div_wraps(
    r_share: ShareTensor,
    theta_r: ShareTensor,
    x_share: ShareTensor,
    z_shares: List[torch.Tensor],
    y: Union[torch.Tensor, int],
) -> ShareTensor:
    """From CrypTen Privately computes the number of wraparounds for a set a shares.

    To do so, we note that:
        [theta_x] = theta_z + [beta_xr] - [theta_r] - [eta_xr]

    Where:
        [theta_x] is the wraps for a variable x
        [beta_xr] is the differential wraps for variables x and r
        [eta_xr]  is the plaintext wraps for variables x and r

    Note: Since [eta_xr] = 0 with probability 1 - |x| / Q for modulus Q, we
    can make the assumption that [eta_xr] = 0 with high probability.

    Args:
        r_share (ShareTensor): share for a random variable "r"
        theta_r (ShareTensor): share for the number of wraparounds for "r"
        x_share (ShareTensor): shares for which we want to compute the number of wraparounds
        z_shares (List[torch.Tensor]): list of shares for a random value
        y (Union[torch.Tensor, int]): the number/tensor by which we divide

    Returns:
        ShareTensor representing the number of wraparounds
    """
    session = get_session(str(r_share.session_uuid))

    beta_xr = count_wraps([x_share.tensor, r_share.tensor])
    theta_x = ShareTensor(config=Config(encoder_precision=0))
    theta_x.tensor = beta_xr - theta_r.tensor

    if session.rank == 0:
        theta_z = count_wraps(z_shares)
        theta_x.tensor += theta_z

    x_share.tensor //= y

    return theta_x
Exemplo n.º 9
0
def mul_parties(
    session: Session,
    a_share: ShareTensor,
    b_share: ShareTensor,
    c_share: ShareTensor,
    eps: torch.Tensor,
    delta: torch.Tensor,
    op_str: str,
) -> ShareTensor:
    """
    [c] = [a * b]
    [eps] = [x] - [a]
    [delta] = [y] - [b]

    Open eps and delta
    [result] = [c] + eps * [b] + delta * [a] + eps * delta

    :return: the ShareTensor for the multiplication
    :rtype: ShareTensor (in our case ShareTensorPointer)
    """

    op = getattr(operator, op_str)

    eps_b = op(eps, b_share.tensor)
    delta_a = op(a_share.tensor, delta)

    share_tensor = c_share.tensor + eps_b + delta_a
    if session.rank == 0:
        delta_eps = op(eps, delta)
        share_tensor += delta_eps

    scale = session.config.encoder_base ** session.config.encoder_precision

    share_tensor //= scale

    # Convert to our tensor type
    share_tensor = share_tensor.type(session.tensor_type)

    share = ShareTensor(session=session)
    share.tensor = share_tensor

    return share
Exemplo n.º 10
0
    def generate_shares(secret, session: Session) -> List[ShareTensor]:
        """Given a secret generate, split it into a number of shares such that
        each party would get one

        :return: list of shares
        :rtype: List of Zero Shares
        """
        if not isinstance(secret, ShareTensor):
            raise ValueError("Secret should be a ShareTensor")

        parties: List[Any] = session.parties
        nr_parties = len(parties)

        min_value = session.min_value
        max_value = session.max_value

        shape = secret.shape
        tensor_type = session.tensor_type

        random_shares = []
        generator = csprng.create_random_device_generator()

        for _ in range(nr_parties - 1):
            rand_value = torch.empty(
                size=shape, dtype=torch.long).random_(generator=generator)
            share = ShareTensor(session=session)

            # Add the share after such that we do not encode it
            share.tensor = rand_value
            random_shares.append(share)

        shares = []
        for i in range(len(parties)):
            if i == 0:
                share = random_shares[i]
            elif i < nr_parties - 1:
                share = random_shares[i] - random_shares[i - 1]
            else:
                share = secret - random_shares[i - 1]

            shares.append(share)
        return shares
Exemplo n.º 11
0
def proto2object(proto: ShareTensor_PB) -> ShareTensor:
    if proto.session_uuid:
        session = sympc.session.get_session(proto.session_uuid)
        if session is None:
            raise ValueError(
                f"The session {proto.session_uuid} could not be found")

        config = dataclasses.asdict(session.config)
    else:
        config = syft.deserialize(proto.config, from_proto=True)

    data = protobuf_tensor_deserializer(proto.tensor.tensor)
    share = ShareTensor(data=None, config=Config(**config))

    if proto.session_uuid:
        share.session_uuid = UUID(proto.session_uuid)

    # Manually put the tensor since we do not want to re-encode it
    share.tensor = data

    return share
Exemplo n.º 12
0
    def generate_shares(
        secret: Union[ShareTensor, torch.Tensor, float, int],
        nr_parties: int,
        config: Config = Config(),
        tensor_type: Optional[torch.dtype] = None,
    ) -> List[ShareTensor]:
        """Generate shares from secret.

        Given a secret, split it into a number of shares such that each
        party would get one.

        Args:
            secret (Union[ShareTensor, torch.Tensor, float, int]): Secret to split.
            nr_parties (int): Number of parties to split the scret.
            config (Config): Configuration used for the Share Tensor (in case it is needed).
                Use default Config if nothing provided. The ShareTensor config would have priority.
            tensor_type (torch.dtype, optional): tensor type. Defaults to None.

        Returns:
            List[ShareTensor]. List of ShareTensor.

        Raises:
            ValueError: If secret is not a expected format.

        Examples:
            >>> from sympc.tensor.mpc_tensor import MPCTensor
            >>> MPCTensor.generate_shares(secret=2, nr_parties=2)
            [[ShareTensor]
                | [FixedPointEncoder]: precision: 16, base: 2
                | Data: tensor([15511500.]), [ShareTensor]
                | [FixedPointEncoder]: precision: 16, base: 2
                | Data: tensor([-15380428.])]
            >>> MPCTensor.generate_shares(secret=2, nr_parties=2,
                encoder_base=3, encoder_precision=4)
            [[ShareTensor]
                | [FixedPointEncoder]: precision: 4, base: 3
                | Data: tensor([14933283.]), [ShareTensor]
                | [FixedPointEncoder]: precision: 4, base: 3
                | Data: tensor([-14933121.])]
        """
        if isinstance(secret, (torch.Tensor, float, int)):
            # if secret is not a ShareTensor, a new instance is created
            secret = ShareTensor(secret, config=config)
        else:
            config = secret.config

        if not isinstance(secret, ShareTensor):
            raise ValueError(
                "Secret should be a ShareTensor, torchTensor, float or int.")

        op = operator.sub
        shape = secret.shape

        random_shares = []
        generator = csprng.create_random_device_generator()

        for _ in range(nr_parties - 1):
            rand_value = torch.empty(
                size=shape, dtype=tensor_type).random_(generator=generator)
            share = ShareTensor(data=rand_value, config=config)
            share.tensor = rand_value

            random_shares.append(share)

        shares = []
        for i in range(nr_parties):
            if i == 0:
                share = random_shares[i]
            elif i < nr_parties - 1:
                share = op(random_shares[i], random_shares[i - 1])
            else:
                share = op(secret, random_shares[i - 1])

            shares.append(share)
        return shares
Exemplo n.º 13
0
def helper_max_pool2d_reshape(
    x: ShareTensor,
    kernel_size: Tuple[int, int],
    stride: Tuple[int, int],
    padding: Tuple[int, int],
    dilation: Tuple[int, int],
) -> ShareTensor:
    """Function that runs at each party for preparing the share.

    Reshape each share tensor to prepare it for calling 'argmax'.
    The new share would have "each element" as the input on which we
    will run the max_pool2d kernel.

    Args:
        x (ShareTensor): the ShareTensor on which to apply the reshaping
        kernel_size (Tuple[int, int]): the kernel size
        stride (Tuple[int, int]): the stride size
        padding (Tuple[int, int]): the padding size
        dilation (Tuple[int, int]): the dilation size

    Returns:
        The prepared share tensor (reshaped)
    """
    session = get_session(x.session_uuid)
    tensor = x.tensor.numpy()

    padding = [(0, 0)] * len(tensor.shape[:-2]) + [
        (padding[0], padding[0]),
        (padding[1], padding[1]),
    ]
    tensor_type = session.tensor_type

    padding_value = 0
    if session.rank == 0:
        # ATTENTION: Min value for max_pool2d that works -25
        padding_value = -25

    tensor = np.pad(tensor,
                    padding,
                    mode="constant",
                    constant_values=padding_value)

    output_shape = tensor.shape[:-2]
    output_shape += (
        (tensor.shape[-2] - kernel_size[0]) // stride[0] + 1,
        (tensor.shape[-1] - kernel_size[1]) // stride[1] + 1,
    )
    output_shape += kernel_size

    output_strides = tensor.strides[:-2]
    output_strides += (stride[0] * tensor.strides[-2],
                       stride[1] * tensor.strides[-1])
    output_strides += tensor.strides[-2:]

    window_view_share = torch.tensor(
        np.lib.stride_tricks.as_strided(tensor,
                                        shape=output_shape,
                                        strides=output_strides),
        dtype=tensor_type,
    )

    window_view_share = window_view_share.reshape(-1, *kernel_size)

    res_share = ShareTensor(config=x.config)
    res_share.tensor = window_view_share
    return res_share
Exemplo n.º 14
0
    def generate_shares(
        secret: Union[ShareTensor, torch.Tensor, float, int],
        nr_parties: int,
        tensor_type: Optional[torch.dtype] = None,
        **kwargs,
    ) -> List[ShareTensor]:
        """Given a secret, split it into a number of shares such that each
        party would get one.

        Args:
            secret (Union[ShareTensor, torch.Tensor, float, int]): secret to split
            nr_parties (int): number of parties to split the scret
            tensor_type (torch.dtype, optional): tensor type. Defaults to None.
            **kwargs: keywords arguments passed to ShareTensor

        Returns:
            List[ShareTensor]. List of ShareTensor

        Examples:
            >>> from sympc.tensor.mpc_tensor import MPCTensor
            >>> MPCTensor.generate_shares(secret=2, nr_parties=2)
            [[ShareTensor]
                | [FixedPointEncoder]: precision: 16, base: 2
                | Data: tensor([15511500.]), [ShareTensor]
                | [FixedPointEncoder]: precision: 16, base: 2
                | Data: tensor([-15380428.])]
            >>> MPCTensor.generate_shares(secret=2, nr_parties=2,
                encoder_base=3, encoder_precision=4)
            [[ShareTensor]
                | [FixedPointEncoder]: precision: 4, base: 3
                | Data: tensor([14933283.]), [ShareTensor]
                | [FixedPointEncoder]: precision: 4, base: 3
                | Data: tensor([-14933121.])]
        """

        if isinstance(secret, (torch.Tensor, float, int)):
            secret = ShareTensor(secret, **kwargs)

        # if secret is not a ShareTensor, a new instance is created
        if not isinstance(secret, ShareTensor):
            raise ValueError(
                "Secret should be a ShareTensor, torchTensor, float or int.")

        shape = secret.shape

        random_shares = []
        generator = csprng.create_random_device_generator()

        for _ in range(nr_parties - 1):
            rand_value = torch.empty(
                size=shape, dtype=tensor_type).random_(generator=generator)
            share = ShareTensor(session=secret.session)
            share.tensor = rand_value

            random_shares.append(share)

        shares = []
        for i in range(nr_parties):
            if i == 0:
                share = random_shares[i]
            elif i < nr_parties - 1:
                share = random_shares[i] - random_shares[i - 1]
            else:
                share = secret - random_shares[i - 1]

            shares.append(share)
        return shares