def from_laplace_mechanism( cls, parameter: float, sensitivity: float = 1, pessimistic_estimate: bool = True, value_discretization_interval: float = 1e-4 ) -> 'PrivacyLossDistribution': """Computes the privacy loss distribution of the Laplace mechanism. Args: parameter: the parameter of the Laplace distribution. sensitivity: the sensitivity of function f. (i.e. the maximum absolute change in f when an input to a single user changes.) pessimistic_estimate: a value indicating whether the rounding is done in such a way that the resulting epsilon-hockey stick divergence computation gives an upper estimate to the real value. value_discretization_interval: the length of the dicretization interval for the privacy loss distribution. The values will be rounded up/down to be integer multiples of this number. Returns: The privacy loss distribution corresponding to the Laplace mechanism with given parameters. """ return PrivacyLossDistribution.create_from_additive_noise( privacy_loss_mechanism.LaplacePrivacyLoss(parameter, sensitivity=sensitivity), pessimistic_estimate=pessimistic_estimate, value_discretization_interval=value_discretization_interval)
def test_laplace_privacy_loss(self, parameter, sensitivity, sampling_prob, adjacency_type, x, expected_privacy_loss): pl = privacy_loss_mechanism.LaplacePrivacyLoss( parameter, sensitivity=sensitivity, sampling_prob=sampling_prob, adjacency_type=adjacency_type) self.assertAlmostEqual(expected_privacy_loss, pl.privacy_loss(x))
def test_laplace_get_delta_for_epsilon(self, parameter, sensitivity, sampling_prob, adjacency_type, epsilon, expected_delta): pl = privacy_loss_mechanism.LaplacePrivacyLoss( parameter, sensitivity=sensitivity, sampling_prob=sampling_prob, adjacency_type=adjacency_type) self.assertAlmostEqual(expected_delta, pl.get_delta_for_epsilon(epsilon))
def test_laplace_value_errors(self, parameter, sensitivity, sampling_prob=1.0, adjacency_type=ADD): with self.assertRaises(ValueError): privacy_loss_mechanism.LaplacePrivacyLoss( parameter, sensitivity=sensitivity, sampling_prob=sampling_prob, adjacency_type=adjacency_type)
def test_laplace_privacy_loss_tail(self, parameter, sensitivity, expected_lower_x_truncation, expected_upper_x_truncation, expected_tail_probability_mass_function): pl = privacy_loss_mechanism.LaplacePrivacyLoss( parameter, sensitivity=sensitivity) tail_pld = pl.privacy_loss_tail() self.assertAlmostEqual(expected_lower_x_truncation, tail_pld.lower_x_truncation) self.assertAlmostEqual(expected_upper_x_truncation, tail_pld.upper_x_truncation) test_util.dictionary_almost_equal(self, expected_tail_probability_mass_function, tail_pld.tail_probability_mass_function)
def test_laplace_get_delta_for_epsilon( self, parameter, sensitivity, epsilon, expected_delta): pl = privacy_loss_mechanism.LaplacePrivacyLoss( parameter, sensitivity=sensitivity) self.assertAlmostEqual(expected_delta, pl.get_delta_for_epsilon(epsilon))
def test_laplace_inverse_privacy_loss(self, parameter, sensitivity, privacy_loss, expected_x): pl = privacy_loss_mechanism.LaplacePrivacyLoss( parameter, sensitivity=sensitivity) self.assertAlmostEqual(expected_x, pl.inverse_privacy_loss(privacy_loss))
def test_laplace_privacy_loss(self, parameter, sensitivity, x, expected_privacy_loss): pl = privacy_loss_mechanism.LaplacePrivacyLoss( parameter, sensitivity=sensitivity) self.assertAlmostEqual(expected_privacy_loss, pl.privacy_loss(x))
def test_laplace_value_errors(self, parameter, sensitivity): with self.assertRaises(ValueError): privacy_loss_mechanism.LaplacePrivacyLoss( parameter, sensitivity=sensitivity)