Exemple #1
0
    def test_fail_on_inconsistent_shape(self):
        size, shape = 1, [2, 3]

        my_buffer = tensor_buffer.TensorBuffer(size, shape, name='my_buffer')

        with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
                                     'Appending value of inconsistent shape.'):
            my_buffer.append(tf.ones(shape=[3, 4], dtype=tf.int32))
Exemple #2
0
    def __init__(self, population_size, selection_probability):
        """Initialize the PrivacyLedger.

    Args:
      population_size: An integer (may be variable) specifying the size of the
        population, i.e. size of the training data used in each epoch.
      selection_probability: A float (may be variable) specifying the
        probability each record is included in a sample.

    Raises:
      ValueError: If selection_probability is 0.
    """
        self._population_size = population_size
        self._selection_probability = selection_probability

        if tf.executing_eagerly():
            if tf.equal(selection_probability, 0):
                raise ValueError('Selection probability cannot be 0.')
            init_capacity = tf.cast(tf.math.ceil(1 / selection_probability),
                                    tf.int32)
        else:
            if selection_probability == 0:
                raise ValueError('Selection probability cannot be 0.')
            init_capacity = np.int(np.ceil(1 / selection_probability))

        # The query buffer stores rows corresponding to P_exponentialSumQueryEntries.
        #这里的query buffer需要存储的是p_exponentialsumquery=[exponents,samples_counts, clipping bound, dimension, beta],故每一个query所需的size=4
        self._query_buffer = tensor_buffer.TensorBuffer(
            init_capacity, [5], tf.float32, 'query')
        #弄清楚_sample_var,_sample_buffer,_sample_count的不同,后两者很好理解,主要是_sample_var是什么意思._sample_var=[exponents,population_size,selection_probability,query_count]

        self._sample_var = tf.Variable(initial_value=tf.zeros([3]),
                                       trainable=False,
                                       name='sample')
        # The sample buffer stores rows corresponding to SampleEntries.
        self._sample_buffer = tensor_buffer.TensorBuffer(
            init_capacity, [3], tf.float32, 'sample')
        self._sample_count = tf.Variable(initial_value=0.0,
                                         trainable=False,
                                         name='sample_count')
        self._query_count = tf.Variable(initial_value=0.0,
                                        trainable=False,
                                        name='query_count')
        self._cs = tf.CriticalSection()
Exemple #3
0
    def __init__(self, population_size, selection_probability):
        """Initializes the PrivacyLedger.

    Args:
      population_size: An integer (may be variable) specifying the size of the
        population, i.e. size of the training data used in each epoch.
      selection_probability: A floating point value (may be variable) specifying
        the probability each record is included in a sample.

    Raises:
      ValueError: If `selection_probability` is 0.
    """
        self._population_size = population_size
        self._selection_probability = selection_probability

        if tf.executing_eagerly():
            if tf.equal(selection_probability, 0):
                raise ValueError('Selection probability cannot be 0.')
            init_capacity = tf.cast(tf.math.ceil(1 / selection_probability),
                                    tf.int32)
        else:
            if selection_probability == 0:
                raise ValueError('Selection probability cannot be 0.')
            init_capacity = np.int(np.ceil(1 / selection_probability))

        # The query buffer stores rows corresponding to GaussianSumQueryEntries.
        self._query_buffer = tensor_buffer.TensorBuffer(
            init_capacity, [3], tf.float32, 'query')
        self._sample_var = tf.Variable(initial_value=tf.zeros([3]),
                                       trainable=False,
                                       name='sample')

        # The sample buffer stores rows corresponding to SampleEntries.
        self._sample_buffer = tensor_buffer.TensorBuffer(
            init_capacity, [3], tf.float32, 'sample')
        self._sample_count = tf.Variable(initial_value=0.0,
                                         trainable=False,
                                         name='sample_count')
        self._query_count = tf.Variable(initial_value=0.0,
                                        trainable=False,
                                        name='query_count')
        self._cs = tf.CriticalSection()
Exemple #4
0
    def test_basic(self):
        size, shape = 2, [2, 3]

        my_buffer = tensor_buffer.TensorBuffer(size, shape, name='my_buffer')

        value1 = [[1, 2, 3], [4, 5, 6]]
        my_buffer.append(value1)
        self.assertAllEqual(my_buffer.values.numpy(), [value1])

        value2 = [[4, 5, 6], [7, 8, 9]]
        my_buffer.append(value2)
        self.assertAllEqual(my_buffer.values.numpy(), [value1, value2])
  def test_noresize(self):
    """Test buffer does not resize if capacity is not exceeded."""
    with self.cached_session() as sess:
      size, shape = 2, [2, 3]

      my_buffer = tensor_buffer.TensorBuffer(size, shape, name='my_buffer')
      value1 = [[1, 2, 3], [4, 5, 6]]
      with tf.control_dependencies([my_buffer.append(value1)]):
        value2 = [[7, 8, 9], [10, 11, 12]]
        with tf.control_dependencies([my_buffer.append(value2)]):
          values = my_buffer.values
          current_size = my_buffer.current_size
          capacity = my_buffer.capacity
      self.evaluate(tf.global_variables_initializer())

      v, cs, cap = sess.run([values, current_size, capacity])
      self.assertAllEqual(v, [value1, value2])
      self.assertEqual(cs, 2)
      self.assertEqual(cap, 2)
Exemple #6
0
    def test_resize(self):
        size, shape = 2, [2, 3]

        my_buffer = tensor_buffer.TensorBuffer(size, shape, name='my_buffer')

        # Append three buffers. Third one should succeed after resizing.
        value1 = [[1, 2, 3], [4, 5, 6]]
        my_buffer.append(value1)
        self.assertAllEqual(my_buffer.values.numpy(), [value1])
        self.assertAllEqual(my_buffer.current_size.numpy(), 1)
        self.assertAllEqual(my_buffer.capacity.numpy(), 2)

        value2 = [[4, 5, 6], [7, 8, 9]]
        my_buffer.append(value2)
        self.assertAllEqual(my_buffer.values.numpy(), [value1, value2])
        self.assertAllEqual(my_buffer.current_size.numpy(), 2)
        self.assertAllEqual(my_buffer.capacity.numpy(), 2)

        value3 = [[7, 8, 9], [10, 11, 12]]
        my_buffer.append(value3)
        self.assertAllEqual(my_buffer.values.numpy(), [value1, value2, value3])
        self.assertAllEqual(my_buffer.current_size.numpy(), 3)
        # Capacity should have doubled.
        self.assertAllEqual(my_buffer.capacity.numpy(), 4)
Exemple #7
0
 def test_fail_on_scalar(self):
     with self.assertRaisesRegexp(ValueError, 'Shape cannot be scalar.'):
         tensor_buffer.TensorBuffer(1, ())