Exemple #1
0
 def testCreateVariables_NHWC(self):
   height, width = 3, 3
   images = random_ops.random_uniform((5, height, width, 8), seed=1)
   normalization_ops.group_norm(images,
                                groups=4,
                                channels_axis=-1,
                                center=True,
                                scale=True)
   beta = get_variables('beta')[0]
   gamma = get_variables('gamma')[0]
   self.assertEqual('GroupNorm/beta', beta.op.name)
   self.assertEqual('GroupNorm/gamma', gamma.op.name)
Exemple #2
0
 def testReuseVariables(self):
   height, width = 3, 3
   images = random_ops.random_uniform((5, height, width, 4), seed=1)
   normalization_ops.group_norm(images, groups=2, scale=True, scope='IN')
   normalization_ops.group_norm(images,
                                groups=2,
                                scale=True,
                                scope='IN',
                                reuse=True)
   beta = get_variables('beta')
   gamma = get_variables('gamma')
   self.assertEqual(1, len(beta))
   self.assertEqual(1, len(gamma))
Exemple #3
0
 def testValueCorrectWithReuseVars(self):
   height, width = 3, 3
   image_shape = (10, height, width, 4)
   images = random_ops.random_uniform(image_shape, seed=1)
   output_train = normalization_ops.group_norm(images, groups=2, scope='IN')
   output_eval = normalization_ops.group_norm(images,
                                              groups=2,
                                              scope='IN',
                                              reuse=True)
   with self.cached_session() as sess:
     sess.run(variables.global_variables_initializer())
     # output_train and output_eval should be the same.
     train_np, eval_np = sess.run([output_train, output_eval])
     self.assertAllClose(train_np, eval_np)
Exemple #4
0
def norm(x, opts, is_training=True, zero_gamma_init=False):
    # We initialise the parameters depending on the gamma_init argument
    if zero_gamma_init:
        p_init = {
            'gamma': tf.zeros_initializer(),
            'beta': tf.zeros_initializer()
        }
    else:
        p_init = {
            'gamma': tf.ones_initializer(),
            'beta': tf.zeros_initializer()
        }

    norm_type = ("GROUP" if opts["group_norm"] else
                 "BATCH" if opts["batch_norm"] else None)
    if norm_type == "BATCH":
        x = tf.layers.batch_normalization(x,
                                          fused=True,
                                          center=True,
                                          scale=True,
                                          training=is_training,
                                          trainable=True,
                                          momentum=opts["BN_decay"],
                                          epsilon=1e-5,
                                          gamma_initializer=p_init['gamma'],
                                          beta_initializer=p_init['beta'])
    elif norm_type == "GROUP":
        # Adding arguments to group norm
        x = normalization_ops.group_norm(x,
                                         groups=opts["groups"],
                                         param_initializers=p_init)

    tf.add_to_collection("activations", x)
    return x
Exemple #5
0
def norm(x: tf.Tensor,
         norm_type='BATCH',
         groups=32,
         training=False) -> tf.Tensor:
    """
    Batch-normalization

    :param x: (tf.Tensor) input tensor
    :param norm_type: (str) type of normalization
    :param groups: (int) size of group
    :param training: (boolean) training flag
    :return:
    """
    if norm_type == 'BATCH':
        # Perhaps use tf.nn.fused_batch_norm instead.
        x = tf.layers.batch_normalization(x,
                                          fused=True,
                                          center=True,
                                          scale=True,
                                          training=training,
                                          trainable=training,
                                          momentum=0.997,
                                          epsilon=1e-5)
    elif norm_type == 'GROUP':
        x = normalization_ops.group_norm(x,
                                         groups=groups,
                                         center=True,
                                         scale=True,
                                         training=training,
                                         trainable=training,
                                         channels_axis=-1)
    return x
Exemple #6
0
 def testCreateOp(self):
   height, width, groups = 3, 3, 4
   images = random_ops.random_uniform((5, height, width, 2 * groups), seed=1)
   output = normalization_ops.group_norm(images,
                                         groups=groups,
                                         channels_axis=-1)
   self.assertListEqual([5, height, width, 2 * groups],
                        output.shape.as_list())
Exemple #7
0
 def norm(self, x, type='BATCH', groups=32, training=False):
     if type == 'BATCH':
         # Perhaps use tf.nn.fused_batch_norm instead.
         x = tf.layers.batch_normalization(x, fused=True, center=True, scale=True,
                                           training=training, trainable=training,
                                           momentum=0.997, epsilon=1e-5)
     elif type == 'GROUP':
         x = normalization_ops.group_norm(x, groups=groups, center=True, scale=True,
                                          training=training, trainable=training,
                                          channels_axis=-1)
     return x
Exemple #8
0
 def testCreateOpNoScaleCenter(self):
   height, width, groups = 3, 3, 7
   images = random_ops.random_uniform((5, height, width, 3 * groups),
                                      dtype=dtypes.float32,
                                      seed=1)
   output = normalization_ops.group_norm(images,
                                         groups=groups,
                                         center=False,
                                         scale=False)
   self.assertListEqual([5, height, width, 3 * groups],
                        output.shape.as_list())
   self.assertEqual(0, len(get_variables('beta')))
   self.assertEqual(0, len(get_variables('gamma')))
Exemple #9
0
def norm(x, is_training=True, norm_type=None, epsilon=1e-6, groups=None):
    if norm_type == 'batch':
        x = batch_norm(x,
                       center=True,
                       scale=True,
                       training=is_training,
                       trainable=True,
                       epsilon=epsilon)
    elif norm_type == 'group':
        x = normalization_ops.group_norm(x,
                                         groups=min(int(x.get_shape()[3]),
                                                    groups),
                                         epsilon=epsilon)
    else:
        raise ValueError("{} not a valid norm_type".format(norm_type))
    return x
Exemple #10
0
def norm(x, opts, is_training=True):
    norm_type = 'GROUP' if opts[
        "group_norm"] else 'BATCH' if opts['batch_norm'] else None

    if norm_type == 'BATCH':
        x = tf.layers.batch_normalization(x,
                                          fused=True,
                                          center=True,
                                          scale=True,
                                          training=is_training,
                                          trainable=True,
                                          momentum=opts["BN_decay"],
                                          epsilon=1e-5)
    elif norm_type == 'GROUP':
        x = normalization_ops.group_norm(x, groups=opts['groups'])

    tf.add_to_collection('activations', x)
    return x
Exemple #11
0
  def testDefunInput(self):
    shape = [10, 10, 10, 30]

    @function.Defun()
    def f():
      return array_ops.ones(shape)

    inputs = f()
    inputs.set_shape(shape)
    output_op = normalization_ops.group_norm(inputs,
                                             groups=5,
                                             center=False,
                                             scale=False,
                                             channels_axis=-1,
                                             training=True)
    with self.cached_session() as sess:
      sess.run(variables.global_variables_initializer())
      outputs = sess.run(output_op)
      self.assertAllClose(outputs, np.zeros(shape))
Exemple #12
0
def norm(x, opts, is_training=True):
    norm_type = ("GROUP" if opts["group_norm"] else
                 "BATCH" if opts["batch_norm"] else None)

    if norm_type == "BATCH":
        x = tf.layers.batch_normalization(
            x,
            fused=True,
            center=True,
            scale=True,
            training=is_training,
            trainable=True,
            momentum=opts["BN_decay"],
            epsilon=1e-5,
        )
    elif norm_type == "GROUP":
        x = normalization_ops.group_norm(x, groups=opts["groups"])

    tf.add_to_collection("activations", x)
    return x
Exemple #13
0
 def testParamsShapeNotFullyDefinedChannelsAxis(self):
   inputs = array_ops.placeholder(dtypes.float32, shape=(1, 3, 4, None))
   with self.assertRaisesRegex(ValueError, 'undefined channel dimension'):
     normalization_ops.group_norm(inputs, channels_axis=-1)
Exemple #14
0
  def doOutputTest(self,
                   input_shape,
                   channels_axis=None,
                   reduction_axes=None,
                   groups=2,
                   tol=1e-1):
    # Select the axis for the channel and the dimensions along which statistics
    # are accumulated.
    if channels_axis < 0:
      channels_axis += len(input_shape)
    reduced_axes = [channels_axis + 1]
    for a in reduction_axes:
      if a < 0:
        a += len(input_shape)
      if a < channels_axis:
        reduced_axes.append(a)
      else:
        reduced_axes.append(a + 1)
    reduced_axes = tuple(reduced_axes)
    channels = input_shape[channels_axis]
    group_size = channels // groups
    # Calculate the final shape for the output Tensor.
    axes_before_channels = input_shape[:channels_axis]
    axes_after_channels = input_shape[channels_axis + 1:]
    outputs_shape = (axes_before_channels + [groups, group_size] +
                     axes_after_channels)

    # Calculate the final shape for the output statistics.
    reduced_shape = []
    for i, a in enumerate(outputs_shape):
      if i not in reduced_axes:
        reduced_shape.append(a)

    mu = 1.0
    sigma = 1.0
    # Determine shape of Tensor after normalization.
    expected_mean = np.zeros(reduced_shape)
    expected_var = np.ones(reduced_shape)

    inputs = random_ops.random_normal(input_shape, seed=0) * sigma + mu
    output_op = normalization_ops.group_norm(inputs,
                                             groups=groups,
                                             center=False,
                                             scale=False,
                                             channels_axis=channels_axis,
                                             training=True)

    with self.cached_session() as sess:
      sess.run(variables.global_variables_initializer())
      outputs = sess.run(output_op)

      # Make sure that there are no NaNs
      self.assertFalse(np.isnan(outputs).any())

      # Implementation detail - in Poplibs group norm, the groups are not
      # contiguous, but strided - we replicate that here
      # Move the channels to the first dimension for inputs, gamma and beta
      outputs = np.swapaxes(outputs, 0, channels_axis)
      reshuffled_outputs = np.empty(outputs.shape, outputs.dtype)
      for from_idx in range(channels):
        to_idx = (from_idx % groups) * group_size + from_idx // groups
        reshuffled_outputs[to_idx] = outputs[from_idx]
      outputs = np.swapaxes(reshuffled_outputs, 0, channels_axis)

      outputs = np.reshape(outputs, outputs_shape)
      mean = np.mean(outputs, axis=reduced_axes, dtype=np.float32)
      var = np.var(outputs, axis=reduced_axes, dtype=np.float32)
      # The mean and variance of each example should be close to 0 and 1
      # respectively.
      self.assertAllClose(expected_mean, mean, rtol=tol, atol=tol)
      self.assertAllClose(expected_var, var, rtol=tol, atol=tol)
Exemple #15
0
 def testInvalidGroupSize(self):
   inputs = array_ops.placeholder(dtypes.float32, shape=(5, 2, 10, 10))
   with self.assertRaisesRegex(ValueError,
                               'Invalid groups 10 for 2 channels.'):
     normalization_ops.group_norm(inputs, groups=10, channels_axis=-3)
Exemple #16
0
 def testBadCommensurateGroup(self):
   inputs = array_ops.placeholder(dtypes.float32, shape=(5, 4, 10, 10))
   with self.assertRaisesRegex(
       ValueError, '4 channels is not commensurate with '
       '3 groups.'):
     normalization_ops.group_norm(inputs, groups=3, channels_axis=-3)
Exemple #17
0
 def testAxisIsBad(self):
   inputs = array_ops.placeholder(dtypes.float32, shape=(1, 2, 4, 5))
   with self.assertRaisesRegex(ValueError, 'Axis is out of bounds.'):
     normalization_ops.group_norm(inputs, channels_axis=5)
Exemple #18
0
 def testUnknownShape(self):
   inputs = array_ops.placeholder(dtypes.float32)
   with self.assertRaisesRegex(ValueError, 'undefined rank'):
     normalization_ops.group_norm(inputs)