def div_wraps( session: Session, r_share: ShareTensor, theta_r: ShareTensor, x_share: ShareTensor, z_shares: List[torch.Tensor], y: Union[torch.Tensor, int], ) -> ShareTensor: """ From CrypTen Privately computes the number of wraparounds for a set a shares To do so, we note that: [theta_x] = theta_z + [beta_xr] - [theta_r] - [eta_xr] Where: [theta_x] is the wraps for a variable x [beta_xr] is the differential wraps for variables x and r [eta_xr] is the plaintext wraps for variables x and r Note: Since [eta_xr] = 0 with probability 1 - |x| / Q for modulus Q, we can make the assumption that [eta_xr] = 0 with high probability. """ beta_xr = count_wraps([x_share.tensor, r_share.tensor]) theta_x = ShareTensor(encoder_precision=0) theta_x.tensor = beta_xr - theta_r.tensor if session.rank == 0: theta_z = count_wraps(z_shares) theta_x.tensor += theta_z x_share.tensor //= y return theta_x
def div_wraps( r_share: ShareTensor, theta_r: ShareTensor, x_share: ShareTensor, z_shares: List[torch.Tensor], y: Union[torch.Tensor, int], ) -> ShareTensor: """From CrypTen Privately computes the number of wraparounds for a set a shares. To do so, we note that: [theta_x] = theta_z + [beta_xr] - [theta_r] - [eta_xr] Where: [theta_x] is the wraps for a variable x [beta_xr] is the differential wraps for variables x and r [eta_xr] is the plaintext wraps for variables x and r Note: Since [eta_xr] = 0 with probability 1 - |x| / Q for modulus Q, we can make the assumption that [eta_xr] = 0 with high probability. Args: r_share (ShareTensor): share for a random variable "r" theta_r (ShareTensor): share for the number of wraparounds for "r" x_share (ShareTensor): shares for which we want to compute the number of wraparounds z_shares (List[torch.Tensor]): list of shares for a random value y (Union[torch.Tensor, int]): the number/tensor by which we divide Returns: ShareTensor representing the number of wraparounds """ session = get_session(str(r_share.session_uuid)) beta_xr = count_wraps([x_share.tensor, r_share.tensor]) theta_x = ShareTensor(config=Config(encoder_precision=0)) theta_x.tensor = beta_xr - theta_r.tensor if session.rank == 0: theta_z = count_wraps(z_shares) theta_x.tensor += theta_z x_share.tensor //= y return theta_x
def count_wraps_rand( nr_parties: int, shape: Tuple[int] ) -> Tuple[List[ShareTensor], List[ShareTensor]]: """ The Trusted Third Party (TTP) or Crypto Provider should generate - a set of shares for a random number - a set of shares for the number of wraparounds for that number Those shares are used when doing a public division, such that the end result would be the correct one. """ rand_val = torch.empty(size=shape, dtype=torch.long).random_( generator=ttp_generator ) r = ShareTensor(data=rand_val, encoder_precision=0) r_shares = MPCTensor.generate_shares(r, nr_parties, torch.long) wraps = count_wraps([share.data for share in r_shares]) theta_r = ShareTensor(data=wraps, encoder_precision=0) theta_r_shares = MPCTensor.generate_shares(theta_r, nr_parties, torch.long) return r_shares, theta_r_shares