def from_discrete_gaussian_mechanism( cls, sigma: float, sensitivity: int = 1, truncation_bound: typing.Optional[int] = None, pessimistic_estimate: bool = True, value_discretization_interval: float = 1e-4) -> 'PrivacyLossDistribution': """Creates the privacy loss distribution of the discrete Gaussian mechanism. Args: sigma: the parameter of the discrete Gaussian distribution. Note that unlike the (continuous) Gaussian distribution this is not equal to the standard deviation of the noise. sensitivity: the sensitivity of function f. (i.e. the maximum absolute change in f when an input to a single user changes.) truncation_bound: bound for truncating the noise, i.e. the noise will only have a support in [-truncation_bound, truncation_bound]. When not specified, truncation_bound will be chosen in such a way that the mass of the noise outside of this range is at most 1e-30. pessimistic_estimate: a value indicating whether the rounding is done in such a way that the resulting epsilon-hockey stick divergence computation gives an upper estimate to the real value. value_discretization_interval: the length of the dicretization interval for the privacy loss distribution. The values will be rounded up/down to be integer multiples of this number. Returns: The privacy loss distribution corresponding to the discrete Gaussian mechanism with given parameters. """ return PrivacyLossDistribution.create_from_additive_noise( privacy_loss_mechanism.DiscreteGaussianPrivacyLoss( sigma, sensitivity=sensitivity, truncation_bound=truncation_bound), pessimistic_estimate=pessimistic_estimate, value_discretization_interval=value_discretization_interval)
def test_discrete_gaussian_value_errors(self, sigma, sensitivity, truncation_bound=None): with self.assertRaises(ValueError): privacy_loss_mechanism.DiscreteGaussianPrivacyLoss( sigma, sensitivity=sensitivity, truncation_bound=truncation_bound)
def test_discrete_gaussian_privacy_loss_value_errors( self, sigma, sensitivity, sampling_prob, adjacency_type, x): pl = privacy_loss_mechanism.DiscreteGaussianPrivacyLoss( sigma, sensitivity=sensitivity, sampling_prob=sampling_prob, adjacency_type=adjacency_type) with self.assertRaises(ValueError): pl.privacy_loss(x)
def test_discrete_gaussian_privacy_loss(self, sigma, sensitivity, sampling_prob, adjacency_type, x, expected_privacy_loss): pl = privacy_loss_mechanism.DiscreteGaussianPrivacyLoss( sigma, sensitivity=sensitivity, sampling_prob=sampling_prob, adjacency_type=adjacency_type) self.assertAlmostEqual(expected_privacy_loss, pl.privacy_loss(x))
def test_discrete_gaussian_value_errors(self, sigma, sensitivity, sampling_prob=1.0, adjacency_type=ADD, truncation_bound=None): with self.assertRaises(ValueError): privacy_loss_mechanism.DiscreteGaussianPrivacyLoss( sigma, sensitivity=sensitivity, truncation_bound=truncation_bound, sampling_prob=sampling_prob, adjacency_type=adjacency_type)
def test_discrete_gaussian_privacy_loss_tail( self, sigma, sensitivity, truncation_bound, expected_lower_x_truncation, expected_upper_x_truncation, expected_tail_probability_mass_function): pl = privacy_loss_mechanism.DiscreteGaussianPrivacyLoss( sigma, sensitivity=sensitivity, truncation_bound=truncation_bound) tail_pld = pl.privacy_loss_tail() self.assertAlmostEqual(expected_lower_x_truncation, tail_pld.lower_x_truncation) self.assertAlmostEqual(expected_upper_x_truncation, tail_pld.upper_x_truncation) test_util.dictionary_almost_equal(self, expected_tail_probability_mass_function, tail_pld.tail_probability_mass_function)
def test_discrete_gaussian_std(self, sigma, sensitivity, truncation_bound, expected_std): pl = privacy_loss_mechanism.DiscreteGaussianPrivacyLoss( sigma, sensitivity=sensitivity, truncation_bound=truncation_bound) self.assertAlmostEqual(expected_std, pl.standard_deviation())
def test_discrete_gaussian_noise_cdf(self, sigma, sensitivity, truncation_bound, x_to_cdf_value): pl = privacy_loss_mechanism.DiscreteGaussianPrivacyLoss( sigma, sensitivity=sensitivity, truncation_bound=truncation_bound) for x, cdf_value in x_to_cdf_value.items(): self.assertAlmostEqual(cdf_value, pl.noise_cdf(x))
def test_discrete_gaussian_inverse_privacy_loss(self, sigma, sensitivity, privacy_loss, expected_x): pl = privacy_loss_mechanism.DiscreteGaussianPrivacyLoss( sigma, sensitivity=sensitivity) self.assertAlmostEqual(expected_x, pl.inverse_privacy_loss(privacy_loss))
def test_discrete_gaussian_privacy_loss(self, sigma, sensitivity, x, expected_privacy_loss): pl = privacy_loss_mechanism.DiscreteGaussianPrivacyLoss( sigma, sensitivity=sensitivity) self.assertAlmostEqual(expected_privacy_loss, pl.privacy_loss(x))
def test_discrete_gaussian_privacy_loss_value_errors( self, sigma, sensitivity, x): pl = privacy_loss_mechanism.DiscreteGaussianPrivacyLoss( sigma, sensitivity=sensitivity) with self.assertRaises(ValueError): pl.privacy_loss(x)