def testFunctionalReuseFromScope(self):
   inputs = variables.Variable(
       np.random.random((5, 4, 3, 6)), dtype=dtypes.float32)
   epsilon = 1e-3
   training = array_ops.placeholder(dtype='bool')
   with variable_scope.variable_scope('scope'):
     _ = normalization_layers.batch_norm(
         inputs, axis=-1, momentum=0.9, epsilon=epsilon, training=training)
     self.assertEqual(len(variables.global_variables()), 5)
   with variable_scope.variable_scope('scope', reuse=True):
     _ = normalization_layers.batch_norm(
         inputs, axis=-1, momentum=0.9, epsilon=epsilon, training=training)
     self.assertEqual(len(variables.global_variables()), 5)
  def testFunctionalReuse(self):
    inputs1 = tf.Variable(np.random.random((5, 4, 3, 6)), dtype=tf.float32)
    inputs2 = tf.Variable(np.random.random((5, 4, 3, 6)), dtype=tf.float32)
    epsilon = 1e-3
    training = tf.placeholder(dtype='bool')
    _ = normalization_layers.batch_norm(
        inputs1, axis=-1, momentum=0.9, epsilon=epsilon,
        training=training, name='bn')
    outputs2 = normalization_layers.batch_norm(
        inputs2, axis=-1, momentum=0.9, epsilon=epsilon,
        training=training, name='bn', reuse=True)

    # Last 2 update ops
    updates = tf.get_collection(tf.GraphKeys.UPDATE_OPS)[-2:]
    all_vars = dict([(v.name, v) for v in tf.global_variables()])
    moving_mean = all_vars['bn/moving_mean:0']
    moving_variance = all_vars['bn/moving_variance:0']
    beta = all_vars['bn/beta:0']
    gamma = all_vars['bn/gamma:0']

    with self.test_session() as sess:
      # Test training with placeholder learning phase.
      sess.run(tf.global_variables_initializer())
      for _ in range(100):
        np_output, _, _ = sess.run([outputs2] + updates,
                                   feed_dict={training: True})

      # Verify that the statistics are updated during training.
      np_moving_mean, np_moving_var = sess.run([moving_mean, moving_variance])
      np_inputs = sess.run(inputs2)
      np_mean = np.mean(np_inputs, axis=(0, 1, 2))
      np_std = np.std(np_inputs, axis=(0, 1, 2))
      np_variance = np.square(np_std)
      self.assertAllClose(np_mean, np_moving_mean, atol=1e-2)
      self.assertAllClose(np_variance, np_moving_var, atol=1e-2)

      # Verify that the axis is normalized during training.
      np_gamma, np_beta = sess.run([gamma, beta])
      np_gamma = np.reshape(np_gamma, (1, 1, 1, 6))
      np_beta = np.reshape(np_beta, (1, 1, 1, 6))
      normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
      self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
      self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)

      # Test inference with placeholder learning phase.
      np_output = sess.run(outputs2, feed_dict={training: False})

      # Verify that the axis is normalized during inference.
      normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
      self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
      self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)
  def testFunctionalNoReuse(self):
    inputs = variables.Variable(
        np.random.random((5, 4, 3, 6)), dtype=dtypes.float32)
    epsilon = 1e-3
    training = array_ops.placeholder(dtype='bool')
    outputs = normalization_layers.batch_norm(
        inputs,
        axis=-1,
        momentum=0.9,
        epsilon=epsilon,
        training=training,
        name='bn')

    updates = ops.get_collection(ops.GraphKeys.UPDATE_OPS)
    all_vars = dict([(v.name, v) for v in variables.global_variables()])
    moving_mean = all_vars['bn/moving_mean:0']
    moving_variance = all_vars['bn/moving_variance:0']
    beta = all_vars['bn/beta:0']
    gamma = all_vars['bn/gamma:0']

    with self.test_session() as sess:
      # Test training with placeholder learning phase.
      sess.run(variables.global_variables_initializer())
      np_gamma, np_beta = sess.run([gamma, beta])
      np_gamma = np.reshape(np_gamma, (1, 1, 1, 6))
      np_beta = np.reshape(np_beta, (1, 1, 1, 6))
      for _ in range(100):
        np_output, _, _ = sess.run([outputs] + updates,
                                   feed_dict={training: True})
        # Verify that the axis is normalized during training.
        normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
        self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
        self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)

      # Verify that the statistics are updated during training.
      np_moving_mean, np_moving_var = sess.run([moving_mean, moving_variance])
      np_inputs = sess.run(inputs)
      np_mean = np.mean(np_inputs, axis=(0, 1, 2))
      np_std = np.std(np_inputs, axis=(0, 1, 2))
      np_variance = np.square(np_std)
      self.assertAllClose(np_mean, np_moving_mean, atol=1e-2)
      self.assertAllClose(np_variance, np_moving_var, atol=1e-2)

      # Test inference with placeholder learning phase.
      np_output = sess.run(outputs, feed_dict={training: False})

      # Verify that the axis is normalized during inference.
      normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta
      self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1)
      self.assertAlmostEqual(np.std(normed_np_output), 1., places=1)