Exemple #1
0
 def testMovingAverageVariables(self):
     height, width = 3, 3
     with self.test_session():
         images = tf.random_uniform((5, height, width, 3), seed=1)
         ops.batch_norm(images, scale=True)
         moving_mean = tf.moving_average_variables()[0]
         moving_variance = tf.moving_average_variables()[1]
         self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean')
         self.assertEquals(moving_variance.op.name,
                           'BatchNorm/moving_variance')
Exemple #2
0
 def testReuseUpdateOps(self):
     height, width = 3, 3
     with self.test_session():
         images = tf.random_uniform((5, height, width, 3), seed=1)
         ops.batch_norm(images, scope='bn')
         self.assertEquals(
             len(tf.get_collection(ops.UPDATE_OPS_COLLECTION)), 2)
         ops.batch_norm(images, scope='bn', reuse=True)
         self.assertEquals(
             len(tf.get_collection(ops.UPDATE_OPS_COLLECTION)), 4)
Exemple #3
0
 def testReuseVariables(self):
     height, width = 3, 3
     with self.test_session():
         images = tf.random_uniform((5, height, width, 3), seed=1)
         ops.batch_norm(images, scale=True, scope='bn')
         ops.batch_norm(images, scale=True, scope='bn', reuse=True)
         beta = variables.get_variables_by_name('beta')
         gamma = variables.get_variables_by_name('gamma')
         self.assertEquals(len(beta), 1)
         self.assertEquals(len(gamma), 1)
         moving_vars = tf.get_collection('moving_vars')
         self.assertEquals(len(moving_vars), 2)
Exemple #4
0
 def testUpdateOps(self):
     height, width = 3, 3
     with self.test_session():
         images = tf.random_uniform((5, height, width, 3), seed=1)
         ops.batch_norm(images)
         update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
         update_moving_mean = update_ops[0]
         update_moving_variance = update_ops[1]
         self.assertEquals(update_moving_mean.op.name,
                           'BatchNorm/AssignMovingAvg')
         self.assertEquals(update_moving_variance.op.name,
                           'BatchNorm/AssignMovingAvg_1')
Exemple #5
0
 def testCreateVariablesWithoutCenterWithoutScale(self):
     height, width = 3, 3
     with self.test_session():
         images = tf.random_uniform((5, height, width, 3), seed=1)
         ops.batch_norm(images, center=False, scale=False)
         beta = variables.get_variables_by_name('beta')
         self.assertEquals(beta, [])
         gamma = variables.get_variables_by_name('gamma')
         self.assertEquals(gamma, [])
         moving_mean = tf.moving_average_variables()[0]
         moving_variance = tf.moving_average_variables()[1]
         self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean')
         self.assertEquals(moving_variance.op.name,
                           'BatchNorm/moving_variance')
Exemple #6
0
 def testComputeMovingVars(self):
     height, width = 3, 3
     with self.test_session() as sess:
         image_shape = (10, height, width, 3)
         image_values = np.random.rand(*image_shape)
         expected_mean = np.mean(image_values, axis=(0, 1, 2))
         expected_var = np.var(image_values, axis=(0, 1, 2))
         images = tf.constant(image_values,
                              shape=image_shape,
                              dtype=tf.float32)
         output = ops.batch_norm(images, decay=0.1)
         update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
         with tf.control_dependencies(update_ops):
             output = tf.identity(output)
         # Initialize all variables
         sess.run(tf.global_variables_initializer())
         moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
         moving_variance = variables.get_variables(
             'BatchNorm/moving_variance')[0]
         mean, variance = sess.run([moving_mean, moving_variance])
         # After initialization moving_mean == 0 and moving_variance == 1.
         self.assertAllClose(mean, [0] * 3)
         self.assertAllClose(variance, [1] * 3)
         for _ in range(10):
             sess.run([output])
         mean = moving_mean.eval()
         variance = moving_variance.eval()
         # After 10 updates with decay 0.1 moving_mean == expected_mean and
         # moving_variance == expected_var.
         self.assertAllClose(mean, expected_mean)
         self.assertAllClose(variance, expected_var)
Exemple #7
0
 def testCreateOp(self):
     height, width = 3, 3
     with self.test_session():
         images = tf.random_uniform((5, height, width, 3), seed=1)
         output = ops.batch_norm(images)
         self.assertTrue(output.op.name.startswith('BatchNorm/batchnorm'))
         self.assertListEqual(output.get_shape().as_list(),
                              [5, height, width, 3])
Exemple #8
0
def conv2d_transpose(inputs,
           num_filters_out,
           kernel_size,
           stride=1,
           padding='SAME',
           activation=tf.nn.relu,
           stddev=0.01,
           bias=0.0,
           weight_decay=0,
           batch_norm_params=None,
           is_training=True,
           trainable=True,
           restore=True,
           scope=None,
           reuse=None):

  with tf.variable_op_scope([inputs], scope, 'Conv_Transpose', reuse=reuse):
    kernel_h, kernel_w = _two_element_tuple(kernel_size)
    stride_h, stride_w = _two_element_tuple(stride)
    num_filters_in = inputs.get_shape()[-1]
    weights_shape = [kernel_h, kernel_w,
                     num_filters_out, num_filters_in]
    weights_initializer = tf.truncated_normal_initializer(stddev=stddev)
    l2_regularizer = None
    if weight_decay and weight_decay > 0:
      l2_regularizer = losses.l2_regularizer(weight_decay)
    weights = variables.variable('weights',
                                 shape=weights_shape,
                                 initializer=weights_initializer,
                                 regularizer=l2_regularizer,
                                 trainable=trainable,
                                 restore=restore)
    h = inputs.get_shape()[1].value
    w = inputs.get_shape()[2].value
    c = inputs.get_shape()[3].value
    output_shape = tf.concat(0, [tf.shape(inputs)[0:1], [h*stride[0], w*stride[1], num_filters_out]])
    conv = tf.nn.conv2d_transpose(inputs, weights, strides=[1, stride_h, stride_w, 1],
                                  output_shape=output_shape, padding=padding)
    conv.set_shape((None, h*stride[0], w*stride[1], num_filters_out))
    if batch_norm_params is not None:
      with scopes.arg_scope([batch_norm], is_training=is_training,
                            trainable=trainable, restore=restore):
        outputs = batch_norm(conv, **batch_norm_params)
    else:
      bias_shape = [num_filters_out,]
      bias_initializer = tf.constant_initializer(bias)
      biases = variables.variable('biases',
                                  shape=bias_shape,
                                  initializer=bias_initializer,
                                  trainable=trainable,
                                  restore=restore)
      outputs = tf.nn.bias_add(conv, biases)
    if activation:
      outputs = activation(outputs)
    return outputs
Exemple #9
0
 def testCreateMovingVars(self):
     height, width = 3, 3
     with self.test_session():
         images = tf.random_uniform((5, height, width, 3), seed=1)
         _ = ops.batch_norm(images, moving_vars='moving_vars')
         moving_mean = tf.get_collection('moving_vars',
                                         'BatchNorm/moving_mean')
         self.assertEquals(len(moving_mean), 1)
         self.assertEquals(moving_mean[0].op.name, 'BatchNorm/moving_mean')
         moving_variance = tf.get_collection('moving_vars',
                                             'BatchNorm/moving_variance')
         self.assertEquals(len(moving_variance), 1)
         self.assertEquals(moving_variance[0].op.name,
                           'BatchNorm/moving_variance')
Exemple #10
0
 def testReuseVars(self):
     height, width = 3, 3
     with self.test_session() as sess:
         image_shape = (10, height, width, 3)
         image_values = np.random.rand(*image_shape)
         expected_mean = np.mean(image_values, axis=(0, 1, 2))
         expected_var = np.var(image_values, axis=(0, 1, 2))
         images = tf.constant(image_values,
                              shape=image_shape,
                              dtype=tf.float32)
         output = ops.batch_norm(images, decay=0.1, is_training=False)
         update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
         with tf.control_dependencies(update_ops):
             barrier = tf.no_op(name='gradient_barrier')
             output = control_flow_ops.with_dependencies([barrier], output)
         # Initialize all variables
         sess.run(tf.initialize_all_variables())
         moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
         moving_variance = variables.get_variables(
             'BatchNorm/moving_variance')[0]
         mean, variance = sess.run([moving_mean, moving_variance])
         # After initialization moving_mean == 0 and moving_variance == 1.
         self.assertAllClose(mean, [0] * 3)
         self.assertAllClose(variance, [1] * 3)
         # Simulate assigment from saver restore.
         init_assigns = [
             tf.assign(moving_mean, expected_mean),
             tf.assign(moving_variance, expected_var)
         ]
         sess.run(init_assigns)
         for _ in range(10):
             sess.run([output], {images: np.random.rand(*image_shape)})
         mean = moving_mean.eval()
         variance = moving_variance.eval()
         # Although we feed different images, the moving_mean and moving_variance
         # shouldn't change.
         self.assertAllClose(mean, expected_mean)
         self.assertAllClose(variance, expected_var)