def __init__(self, population_size, selection_probability): """Initialize the PrivacyLedger. Args: population_size: An integer (may be variable) specifying the size of the population, i.e. size of the training data used in each epoch. selection_probability: A float (may be variable) specifying the probability each record is included in a sample. Raises: ValueError: If selection_probability is 0. """ self._population_size = population_size self._selection_probability = selection_probability if tf.executing_eagerly(): if tf.equal(selection_probability, 0): raise ValueError('Selection probability cannot be 0.') init_capacity = tf.cast(tf.ceil(1 / selection_probability), tf.int32) else: if selection_probability == 0: raise ValueError('Selection probability cannot be 0.') init_capacity = np.int(np.ceil(1 / selection_probability)) # The query buffer stores rows corresponding to GaussianSumQueryEntries. self._query_buffer = tensor_buffer.TensorBuffer( init_capacity, [3], tf.float32, 'query') self._sample_var = tf.Variable(initial_value=tf.zeros([3]), trainable=False, name='sample') # The sample buffer stores rows corresponding to SampleEntries. self._sample_buffer = tensor_buffer.TensorBuffer( init_capacity, [3], tf.float32, 'sample') self._sample_count = tf.Variable(initial_value=0.0, trainable=False, name='sample_count') self._query_count = tf.Variable(initial_value=0.0, trainable=False, name='query_count') try: # Newer versions of TF self._cs = tf.CriticalSection() except AttributeError: # Older versions of TF self._cs = tf.contrib.framework.CriticalSection()
def test_fail_on_inconsistent_shape(self): size, shape = 1, [2, 3] my_buffer = tensor_buffer.TensorBuffer(size, shape, name='my_buffer') with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'Appending value of inconsistent shape.'): my_buffer.append(tf.ones(shape=[3, 4], dtype=tf.int32))
def __init__(self, population_size, selection_probability=None, max_samples=None, max_queries=None): """Initialize the PrivacyLedger. Args: population_size: An integer (may be variable) specifying the size of the population, i.e. size of the training data used in each epoch. selection_probability: A float (may be variable) specifying the probability each record is included in a sample. max_samples: The maximum number of samples. An exception is thrown if more than this many samples are recorded. max_queries: The maximum number of queries. An exception is thrown if more than this many queries are recorded. """ self._population_size = population_size self._selection_probability = selection_probability if max_samples is None: max_samples = 1000 * population_size if max_queries is None: max_queries = 1000 * population_size # The query buffer stores rows corresponding to GaussianSumQueryEntries. self._query_buffer = tensor_buffer.TensorBuffer( max_queries, [3], tf.float32, 'query') self._sample_var = tf.Variable(initial_value=tf.zeros([3]), trainable=False, name='sample') # The sample buffer stores rows corresponding to SampleEntries. self._sample_buffer = tensor_buffer.TensorBuffer( max_samples, [3], tf.float32, 'sample') self._sample_count = tf.Variable(initial_value=0.0, trainable=False, name='sample_count') self._query_count = tf.Variable(initial_value=0.0, trainable=False, name='query_count') try: # Newer versions of TF self._cs = tf.CriticalSection() except AttributeError: # Older versions of TF self._cs = tf.contrib.framework.CriticalSection()
def test_basic(self): size, shape = 2, [2, 3] my_buffer = tensor_buffer.TensorBuffer(size, shape, name='my_buffer') value1 = [[1, 2, 3], [4, 5, 6]] my_buffer.append(value1) self.assertAllEqual(my_buffer.values.numpy(), [value1]) value2 = [[4, 5, 6], [7, 8, 9]] my_buffer.append(value2) self.assertAllEqual(my_buffer.values.numpy(), [value1, value2])
def test_fail_on_overflow(self): size, shape = 2, [2, 3] my_buffer = tensor_buffer.TensorBuffer(size, shape, name='my_buffer') # First two should succeed. my_buffer.append(tf.ones(shape=shape, dtype=tf.int32)) my_buffer.append(tf.ones(shape=shape, dtype=tf.int32)) # Third one should fail. with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'Appending past end of TensorBuffer.'): my_buffer.append(tf.ones(shape=shape, dtype=tf.int32))
def test_noresize(self): """Test buffer does not resize if capacity is not exceeded.""" with self.cached_session() as sess: size, shape = 2, [2, 3] my_buffer = tensor_buffer.TensorBuffer(size, shape, name='my_buffer') value1 = [[1, 2, 3], [4, 5, 6]] with tf.control_dependencies([my_buffer.append(value1)]): value2 = [[7, 8, 9], [10, 11, 12]] with tf.control_dependencies([my_buffer.append(value2)]): values = my_buffer.values current_size = my_buffer.current_size capacity = my_buffer.capacity self.evaluate(tf.global_variables_initializer()) v, cs, cap = sess.run([values, current_size, capacity]) self.assertAllEqual(v, [value1, value2]) self.assertEqual(cs, 2) self.assertEqual(cap, 2)
def test_resize(self): size, shape = 2, [2, 3] my_buffer = tensor_buffer.TensorBuffer(size, shape, name='my_buffer') # Append three buffers. Third one should succeed after resizing. value1 = [[1, 2, 3], [4, 5, 6]] my_buffer.append(value1) self.assertAllEqual(my_buffer.values.numpy(), [value1]) self.assertAllEqual(my_buffer.current_size.numpy(), 1) self.assertAllEqual(my_buffer.capacity.numpy(), 2) value2 = [[4, 5, 6], [7, 8, 9]] my_buffer.append(value2) self.assertAllEqual(my_buffer.values.numpy(), [value1, value2]) self.assertAllEqual(my_buffer.current_size.numpy(), 2) self.assertAllEqual(my_buffer.capacity.numpy(), 2) value3 = [[7, 8, 9], [10, 11, 12]] my_buffer.append(value3) self.assertAllEqual(my_buffer.values.numpy(), [value1, value2, value3]) self.assertAllEqual(my_buffer.current_size.numpy(), 3) # Capacity should have doubled. self.assertAllEqual(my_buffer.capacity.numpy(), 4)
def test_fail_on_scalar(self): with self.assertRaisesRegex(ValueError, 'Shape cannot be scalar.'): tensor_buffer.TensorBuffer(1, ())