def test_gaussian_self_composition(self, standard_deviation, sensitivity, num_times, expected_standard_deviation, expected_sensitivity): pld = privacy_loss_distribution.GaussianPrivacyLossDistribution( standard_deviation, sensitivity=sensitivity, value_discretization_interval=1) composed_pld = pld.self_compose(num_times) self.assertAlmostEqual(expected_standard_deviation, composed_pld._standard_deviation) self.assertAlmostEqual(expected_sensitivity, composed_pld._sensitivity)
def test_gaussian_discretization( self, value_discretization_interval, expected_rounded_probability_mass_function): pld = privacy_loss_distribution.GaussianPrivacyLossDistribution( 1, value_discretization_interval=value_discretization_interval, log_mass_truncation_bound=math.log(2) + stats.norm.logcdf(-1)) self.assertAlmostEqual(stats.norm.cdf(-1), pld.infinity_mass) dictionary_almost_equal(self, expected_rounded_probability_mass_function, pld.rounded_probability_mass_function)
def test_gaussian_optimistic(self, sensitivity, expected_rounded_probability_mass_function): pld = privacy_loss_distribution.GaussianPrivacyLossDistribution( 1, sensitivity=sensitivity, pessimistic_estimate=False, value_discretization_interval=1, log_mass_truncation_bound=math.log(2) + stats.norm.logcdf(-0.9)) self.assertAlmostEqual(0, pld.infinity_mass) dictionary_almost_equal(self, expected_rounded_probability_mass_function, pld.rounded_probability_mass_function)
def test_gaussian_varying_standard_deviation_and_sensitivity( self, standard_deviation, sensitivity, expected_rounded_probability_mass_function): pld = privacy_loss_distribution.GaussianPrivacyLossDistribution( standard_deviation, sensitivity=sensitivity, value_discretization_interval=1, log_mass_truncation_bound=math.log(2) + stats.norm.logcdf(-0.9)) self.assertAlmostEqual(stats.norm.cdf(-0.9), pld.infinity_mass) dictionary_almost_equal(self, expected_rounded_probability_mass_function, pld.rounded_probability_mass_function)
def test_gaussian_privacy_loss_tail( self, standard_deviation, sensitivity, expected_lower_x_truncation, expected_upper_x_truncation, pessimistic_estimate, expected_tail_probability_mass_function): pld = privacy_loss_distribution.GaussianPrivacyLossDistribution( standard_deviation, sensitivity=sensitivity, value_discretization_interval=1, pessimistic_estimate=pessimistic_estimate, log_mass_truncation_bound=math.log(2) + stats.norm.logcdf(-1)) tail_pld = pld.privacy_loss_tail() self.assertAlmostEqual(expected_lower_x_truncation, tail_pld.lower_x_truncation) self.assertAlmostEqual(expected_upper_x_truncation, tail_pld.upper_x_truncation) dictionary_almost_equal(self, expected_tail_probability_mass_function, tail_pld.tail_probability_mass_function)
def main(argv): if len(argv) > 1: raise app.UsageError('Too many command-line arguments.') # The parameter of Laplace Noise added parameter_laplace = 3 # PLD for one execution of the Laplace Mechanism. (Throughout we assume that # sensitivity = 1.) laplace_pld = privacy_loss_distribution.LaplacePrivacyLossDistribution( parameter_laplace, value_discretization_interval=1e-3) # Number of times Laplace Mechanism is run num_laplace = 40 # PLD for num_laplace executions of the Laplace Mechanism. composed_laplace_pld = laplace_pld.self_compose(num_laplace) epsilon = 10 delta = composed_laplace_pld.hockey_stick_divergence(epsilon) print(f'An algorithm that executes the Laplace Mechanism with parameter ' f'{parameter_laplace} for a total of {num_laplace} times is ' f'({epsilon}, {delta})-DP.') # PLDs for different mechanisms can also be composed. Below is an example in # which we compose PLDs for Laplace Mechanism and Gaussian Mechanism. # STD of the Gaussian Noise standard_deviation = 5 # PLD for an execution of the Gaussian Mechanism. gaussian_pld = privacy_loss_distribution.GaussianPrivacyLossDistribution( standard_deviation, value_discretization_interval=1e-3) # PLD for num_laplace executions of the Laplace Mechanism and one execution of # the Gaussian Mechanism. composed_laplace_and_gaussian_pld = composed_laplace_pld.compose( gaussian_pld) epsilon = 10 delta = composed_laplace_and_gaussian_pld.hockey_stick_divergence(epsilon) print(f'An algorithm that executes the Laplace Mechanism with parameter ' f'{parameter_laplace} for a total of {num_laplace} times and in ' f'addition executes once the Gaussian Mechanism with STD ' f'{standard_deviation} is ({epsilon}, {delta})-DP.')
def test_gaussian_value_errors(self, standard_deviation, sensitivity): with self.assertRaises(ValueError): privacy_loss_distribution.GaussianPrivacyLossDistribution( standard_deviation, sensitivity=sensitivity)