def test_laplace_privacy_loss(self, parameter, sensitivity, x,
                               expected_privacy_loss):
     pld = privacy_loss_distribution.LaplacePrivacyLossDistribution(
         parameter,
         sensitivity=sensitivity,
         value_discretization_interval=1)
     self.assertAlmostEqual(expected_privacy_loss, pld.privacy_loss(x))
 def test_laplace_get_delta_for_epsilon(self, parameter, sensitivity,
                                        epsilon, expected_divergence):
     pld = privacy_loss_distribution.LaplacePrivacyLossDistribution(
         parameter,
         sensitivity=sensitivity,
         value_discretization_interval=1)
     self.assertAlmostEqual(expected_divergence,
                            pld.get_delta_for_epsilon(epsilon))
 def test_laplace_discretization(
         self, value_discretization_interval,
         expected_rounded_probability_mass_function):
     pld = privacy_loss_distribution.LaplacePrivacyLossDistribution(
         1, value_discretization_interval=value_discretization_interval)
     dictionary_almost_equal(self,
                             expected_rounded_probability_mass_function,
                             pld.rounded_probability_mass_function)
 def test_laplace_varying_parameter_and_sensitivity(
     self, parameter, sensitivity, expected_rounded_probability_mass_function):
   pld = privacy_loss_distribution.LaplacePrivacyLossDistribution(
       parameter, sensitivity=sensitivity, value_discretization_interval=1)
   dictionary_almost_equal(
       self,
       expected_rounded_probability_mass_function,
       pld.rounded_probability_mass_function)
 def test_laplace_optimistic(self, sensitivity,
                             expected_rounded_probability_mass_function):
     pld = privacy_loss_distribution.LaplacePrivacyLossDistribution(
         1,
         sensitivity=sensitivity,
         pessimistic_estimate=False,
         value_discretization_interval=1)
     dictionary_almost_equal(self,
                             expected_rounded_probability_mass_function,
                             pld.rounded_probability_mass_function)
 def test_laplace_privacy_loss_tail(self, parameter, sensitivity,
                                    expected_lower_x_truncation,
                                    expected_upper_x_truncation,
                                    expected_tail_probability_mass_function):
   pld = privacy_loss_distribution.LaplacePrivacyLossDistribution(
       parameter, sensitivity=sensitivity, value_discretization_interval=1)
   tail_pld = pld.privacy_loss_tail()
   self.assertAlmostEqual(expected_lower_x_truncation,
                          tail_pld.lower_x_truncation)
   self.assertAlmostEqual(expected_upper_x_truncation,
                          tail_pld.upper_x_truncation)
   dictionary_almost_equal(self, expected_tail_probability_mass_function,
                           tail_pld.tail_probability_mass_function)
Beispiel #7
0
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Too many command-line arguments.')

    # The parameter of Laplace Noise added
    parameter_laplace = 3
    # PLD for one execution of the Laplace Mechanism. (Throughout we assume that
    # sensitivity = 1.)
    laplace_pld = privacy_loss_distribution.LaplacePrivacyLossDistribution(
        parameter_laplace, value_discretization_interval=1e-3)

    # Number of times Laplace Mechanism is run
    num_laplace = 40
    # PLD for num_laplace executions of the Laplace Mechanism.
    composed_laplace_pld = laplace_pld.self_compose(num_laplace)

    epsilon = 10
    delta = composed_laplace_pld.hockey_stick_divergence(epsilon)
    print(f'An algorithm that executes the Laplace Mechanism with parameter '
          f'{parameter_laplace} for a total of {num_laplace} times is '
          f'({epsilon}, {delta})-DP.')

    # PLDs for different mechanisms can also be composed. Below is an example in
    # which we compose PLDs for Laplace Mechanism and Gaussian Mechanism.

    # STD of the Gaussian Noise
    standard_deviation = 5
    # PLD for an execution of the Gaussian Mechanism.
    gaussian_pld = privacy_loss_distribution.GaussianPrivacyLossDistribution(
        standard_deviation, value_discretization_interval=1e-3)

    # PLD for num_laplace executions of the Laplace Mechanism and one execution of
    # the Gaussian Mechanism.
    composed_laplace_and_gaussian_pld = composed_laplace_pld.compose(
        gaussian_pld)

    epsilon = 10
    delta = composed_laplace_and_gaussian_pld.hockey_stick_divergence(epsilon)
    print(f'An algorithm that executes the Laplace Mechanism with parameter '
          f'{parameter_laplace} for a total of {num_laplace} times and in '
          f'addition executes once the Gaussian Mechanism with STD '
          f'{standard_deviation} is ({epsilon}, {delta})-DP.')