def test_noise_shape_correctness(self):
     x = np.random.normal(size=(4, 16)).astype(np.float32)
     dropped_rows = self.evaluate(
         stateless_dropout_lib.stateless_dropout(x,
                                                 0.8,
                                                 seed=[2, 4],
                                                 noise_shape=[4, 1]))
     dropped_columns = self.evaluate(
         stateless_dropout_lib.stateless_dropout(x,
                                                 0.8,
                                                 seed=[2, 4],
                                                 noise_shape=[1, 16]))
     self.assertBetween(np.sum(np.sum(dropped_rows, axis=-1) == 0), 1, 3)
     self.assertBetween(np.sum(np.sum(dropped_columns, axis=0) == 0), 5, 15)
 def f(x, seed=np.array(1, dtype=np.int32)):
     if recompute_grad_lib.get_recompute_context() is not None:
         seed = recompute_grad_lib.get_recompute_context().seed
     return stateless_dropout_lib.stateless_dropout(model(x),
                                                    rate=0.5,
                                                    seed=tf.stack(
                                                        [seed, 0]))
 def dropped_inputs():
   """Randomly drops elements of `inputs` when `training=True`."""
   recompute_context = recompute_grad_lib.get_recompute_context()
   if recompute_context is None:
     if self.force_recomputation:
       raise ValueError(
           'RecomputeContext is required when force_recomputation=True.')
     return tf.nn.dropout(
         inputs,
         noise_shape=self._get_noise_shape(inputs),
         seed=self.seed,
         rate=self.rate)
   seed = tf.stack([recompute_context.seed, self._recompute_seed])
   return stateless_dropout_lib.stateless_dropout(
       inputs,
       rate=self.rate,
       seed=seed,
       noise_shape=self._get_noise_shape(inputs))
 def f(x):
     if recompute_grad_lib.get_recompute_context() is None:
         generator = tf.random.experimental.get_global_generator()
         recompute_grad_seed = tf.stack(
             (generator.uniform_full_int([], tf.int32, name='seed'), 0))
     else:
         recompute_grad_seed = tf.stack(
             (recompute_grad_lib.get_recompute_context().seed, 0))
     seeds = tf.random.stateless_uniform([len(self._units), 2],
                                         recompute_grad_seed,
                                         minval=-2**31,
                                         maxval=2**31 - 1,
                                         dtype=tf.int32,
                                         name='dropout_seeds')
     for i, (kernel, bias) in enumerate(zip(self._kernels,
                                            self._biases)):
         x = tf.nn.tanh(tf.matmul(x, kernel) + bias)
         x = stateless_dropout_lib.stateless_dropout(
             x, self._rate, seeds[i])
     return x
 def test_deterministic(self):
     x = np.random.normal(size=(4, 8)).astype(np.float32)
     y = self.evaluate(
         stateless_dropout_lib.stateless_dropout(x, 0.5, seed=[2, 4]))
     self.assertAllEqual(
         stateless_dropout_lib.stateless_dropout(x, 0.5, seed=[2, 4]), y)
 def test_rate_correctness(self):
     x = np.random.normal(size=(4, 8)).astype(np.float32)
     y = self.evaluate(
         stateless_dropout_lib.stateless_dropout(x, 0.8, seed=[2, 4]))
     self.assertBetween(np.sum(y == 0), 10, 31)
 def test_rate_zero(self):
     x = np.random.normal(size=(4, 8)).astype(np.float32)
     self.assertAllEqual(
         stateless_dropout_lib.stateless_dropout(x, 0, seed=[1, 0]), x)