def testCreateVariables_NCHW(self): height, width, groups = 3, 3, 4 images = random_ops.random_uniform((5, 2 * groups, height, width), seed=1) normalization.group_norm(images, groups=4, channels_axis=-3, reduction_axes=(-2, -1), center=True, scale=True) beta = contrib_variables.get_variables_by_name('beta')[0] gamma = contrib_variables.get_variables_by_name('gamma')[0] self.assertEqual('GroupNorm/beta', beta.op.name) self.assertEqual('GroupNorm/gamma', gamma.op.name)
def testCreateOpFloat64(self): height, width, groups = 3, 3, 5 images = random_ops.random_uniform((5, height, width, 4 * groups), dtype=dtypes.float64, seed=1) output = normalization.group_norm(images, groups=groups) self.assertEqual(dtypes.float64, output.dtype) self.assertListEqual([5, height, width, 4 * groups], output.shape.as_list())
def testParamsShapeNotFullyDefinedBatchAxis(self): height, width, groups = 3, 3, 4 inputs = array_ops.placeholder(dtypes.float32, shape=(None, height, width, 2 * groups)) output = normalization.group_norm(inputs, channels_axis=-1, reduction_axes=[-3, -2], groups=groups) self.assertListEqual([None, height, width, 2 * groups], output.shape.as_list())
def testCreateOp(self): height, width, groups = 3, 3, 4 images = random_ops.random_uniform((5, height, width, 2 * groups), seed=1) output = normalization.group_norm(images, groups=groups, channels_axis=-1, reduction_axes=[-3, -2]) print('name: ', output.op.name) self.assertListEqual([5, height, width, 2 * groups], output.shape.as_list())
def testCreateOpNoScaleCenter(self): height, width, groups = 3, 3, 7 images = random_ops.random_uniform((5, height, width, 3 * groups), dtype=dtypes.float32, seed=1) output = normalization.group_norm(images, groups=groups, center=False, scale=False) self.assertListEqual([5, height, width, 3 * groups], output.shape.as_list()) self.assertEqual(0, len(contrib_variables.get_variables_by_name('beta'))) self.assertEqual(0, len(contrib_variables.get_variables_by_name('gamma')))
def testNotMutuallyExclusiveAxis(self): inputs = array_ops.placeholder(dtypes.float32, shape=(10, 32, 32, 32)) # Specify axis with negative values. with self.assertRaisesRegexp(ValueError, 'mutually exclusive'): normalization.group_norm(inputs, channels_axis=-2, reduction_axes=[-2]) # Specify axis with positive values. with self.assertRaisesRegexp(ValueError, 'mutually exclusive'): normalization.group_norm(inputs, channels_axis=1, reduction_axes=[1, 3]) # Specify axis with mixed positive and negative values. with self.assertRaisesRegexp(ValueError, 'mutually exclusive'): normalization.group_norm(inputs, channels_axis=-2, reduction_axes=[2])
def doOutputTest(self, input_shape, channels_axis=None, reduction_axes=None, mean_close_to_zero=False, groups=2, tol=1e-2): # Select the axis for the channel and the dimensions along which statistics # are accumulated. if channels_axis < 0: channels_axis += len(input_shape) reduced_axes = [channels_axis + 1] for a in reduction_axes: if a < 0: a += len(input_shape) if a < channels_axis: reduced_axes.append(a) else: reduced_axes.append(a + 1) reduced_axes = tuple(reduced_axes) # Calculate the final shape for the output Tensor. axes_before_channels = input_shape[:channels_axis] axes_after_channels = input_shape[channels_axis + 1:] channels = input_shape[channels_axis] outputs_shape = (axes_before_channels + [groups, channels // groups] + axes_after_channels) # Calculate the final shape for the output statistics. reduced_shape = [] for i, a in enumerate(outputs_shape): if i not in reduced_axes: reduced_shape.append(a) if mean_close_to_zero: mu_tuple = (1e-4, 1e-2, 1.0) sigma_tuple = (1e-2, 0.1, 1.0) else: mu_tuple = (1.0, 1e2) sigma_tuple = (1.0, 0.1) for mu in mu_tuple: for sigma in sigma_tuple: # Determine shape of Tensor after normalization. expected_mean = np.zeros(reduced_shape) expected_var = np.ones(reduced_shape) inputs = random_ops.random_normal(input_shape, seed=0) * sigma + mu output_op = normalization.group_norm( inputs, groups=groups, center=False, scale=False, channels_axis=channels_axis, reduction_axes=reduction_axes, mean_close_to_zero=mean_close_to_zero) with self.cached_session() as sess: sess.run(variables.global_variables_initializer()) outputs = sess.run(output_op) # Make sure that there are no NaNs self.assertFalse(np.isnan(outputs).any()) outputs = np.reshape(outputs, outputs_shape) mean = np.mean(outputs, axis=reduced_axes) var = np.var(outputs, axis=reduced_axes) # The mean and variance of each example should be close to 0 and 1 # respectively. self.assertAllClose(expected_mean, mean, rtol=tol, atol=tol) self.assertAllClose(expected_var, var, rtol=tol, atol=tol)
def testParamsShapeNotFullyDefinedReductionAxes(self): inputs = array_ops.placeholder(dtypes.float32, shape=(1, 32, None, 4)) with self.assertRaisesRegexp(ValueError, 'undefined dimensions'): normalization.group_norm(inputs)
def testUnknownShape(self): inputs = array_ops.placeholder(dtypes.float32) with self.assertRaisesRegexp(ValueError, 'undefined rank'): normalization.group_norm(inputs)
def testAxisIsBad(self): inputs = array_ops.placeholder(dtypes.float32, shape=(1, 2, 4, 5)) with self.assertRaisesRegexp(ValueError, 'Axis is out of bounds.'): normalization.group_norm(inputs, channels_axis=5) with self.assertRaisesRegexp(ValueError, 'Axis is out of bounds.'): normalization.group_norm(inputs, reduction_axes=[1, 5])