def testReuseUpdateOps(self):
   height, width = 3, 3
   with self.test_session():
     images = tf.random_uniform((5, height, width, 3), seed=1)
     ops.batch_norm(images, scope='bn')
     self.assertEquals(len(tf.get_collection(ops.UPDATE_OPS_COLLECTION)), 2)
     ops.batch_norm(images, scope='bn', reuse=True)
     self.assertEquals(len(tf.get_collection(ops.UPDATE_OPS_COLLECTION)), 4)
 def testReuseUpdateOps(self):
   height, width = 3, 3
   with self.test_session():
     images = tf.random_uniform((5, height, width, 3), seed=1)
     ops.batch_norm(images, scope='bn')
     self.assertEquals(len(tf.get_collection(ops.UPDATE_OPS_COLLECTION)), 2)
     ops.batch_norm(images, scope='bn', reuse=True)
     self.assertEquals(len(tf.get_collection(ops.UPDATE_OPS_COLLECTION)), 4)
 def testMovingAverageVariables(self):
   height, width = 3, 3
   with self.test_session():
     images = tf.random_uniform((5, height, width, 3), seed=1)
     ops.batch_norm(images, scale=True)
     moving_mean = tf.moving_average_variables()[0]
     moving_variance = tf.moving_average_variables()[1]
     self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean')
     self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance')
 def testMovingAverageVariables(self):
   height, width = 3, 3
   with self.test_session():
     images = tf.random_uniform((5, height, width, 3), seed=1)
     ops.batch_norm(images, scale=True)
     moving_mean = tf.moving_average_variables()[0]
     moving_variance = tf.moving_average_variables()[1]
     self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean')
     self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance')
Example #5
0
 def testUpdateOps(self):
     height, width = 3, 3
     with self.test_session():
         images = tf.random_uniform((5, height, width, 3), seed=1)
         ops.batch_norm(images)
         update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
         update_moving_mean = update_ops[0]
         update_moving_variance = update_ops[1]
         self.assertEqual(update_moving_mean.op.name,
                          'BatchNorm/AssignMovingAvg')
         self.assertEqual(update_moving_variance.op.name,
                          'BatchNorm/AssignMovingAvg_1')
 def testReuseVariables(self):
   height, width = 3, 3
   with self.test_session():
     images = tf.random_uniform((5, height, width, 3), seed=1)
     ops.batch_norm(images, scale=True, scope='bn')
     ops.batch_norm(images, scale=True, scope='bn', reuse=True)
     beta = variables.get_variables_by_name('beta')
     gamma = variables.get_variables_by_name('gamma')
     self.assertEquals(len(beta), 1)
     self.assertEquals(len(gamma), 1)
     moving_vars = tf.get_collection('moving_vars')
     self.assertEquals(len(moving_vars), 2)
 def testUpdateOps(self):
   height, width = 3, 3
   with self.test_session():
     images = tf.random_uniform((5, height, width, 3), seed=1)
     ops.batch_norm(images)
     update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
     update_moving_mean = update_ops[0]
     update_moving_variance = update_ops[1]
     self.assertEquals(update_moving_mean.op.name,
                       'BatchNorm/AssignMovingAvg')
     self.assertEquals(update_moving_variance.op.name,
                       'BatchNorm/AssignMovingAvg_1')
Example #8
0
 def testReuseVariables(self):
     height, width = 3, 3
     with self.test_session():
         images = tf.random_uniform((5, height, width, 3), seed=1)
         ops.batch_norm(images, scale=True, scope='bn')
         ops.batch_norm(images, scale=True, scope='bn', reuse=True)
         beta = variables.get_variables_by_name('beta')
         gamma = variables.get_variables_by_name('gamma')
         self.assertEqual(len(beta), 1)
         self.assertEqual(len(gamma), 1)
         moving_vars = tf.get_collection('moving_vars')
         self.assertEqual(len(moving_vars), 2)
 def testCreateVariablesWithoutCenterWithoutScale(self):
   height, width = 3, 3
   with self.test_session():
     images = tf.random_uniform((5, height, width, 3), seed=1)
     ops.batch_norm(images, center=False, scale=False)
     beta = variables.get_variables_by_name('beta')
     self.assertEquals(beta, [])
     gamma = variables.get_variables_by_name('gamma')
     self.assertEquals(gamma, [])
     moving_mean = tf.moving_average_variables()[0]
     moving_variance = tf.moving_average_variables()[1]
     self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean')
     self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance')
Example #10
0
 def testCreateVariables(self):
   height, width = 3, 3
   with self.test_session():
     images = tf.random_uniform((5, height, width, 3), seed=1)
     ops.batch_norm(images, scale=True)
     beta = variables.get_variables_by_name('beta')[0]
     gamma = variables.get_variables_by_name('gamma')[0]
     self.assertEquals(beta.op.name, 'BatchNorm/beta')
     self.assertEquals(gamma.op.name, 'BatchNorm/gamma')
     moving_mean = tf.get_collection('moving_vars')[0]
     moving_variance = tf.get_collection('moving_vars')[1]
     self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean')
     self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance')
 def testCreateVariablesWithoutCenterWithoutScale(self):
   height, width = 3, 3
   with self.test_session():
     images = tf.random_uniform((5, height, width, 3), seed=1)
     ops.batch_norm(images, center=False, scale=False)
     beta = variables.get_variables_by_name('beta')
     self.assertEquals(beta, [])
     gamma = variables.get_variables_by_name('gamma')
     self.assertEquals(gamma, [])
     moving_mean = tf.moving_average_variables()[0]
     moving_variance = tf.moving_average_variables()[1]
     self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean')
     self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance')
Example #12
0
 def testCreateVariablesWithScale(self):
     height, width = 3, 3
     with self.test_session():
         images = tf.random_uniform((5, height, width, 3), seed=1)
         ops.batch_norm(images, scale=True)
         beta = variables.get_variables_by_name('beta')[0]
         gamma = variables.get_variables_by_name('gamma')[0]
         self.assertEqual(beta.op.name, 'BatchNorm/beta')
         self.assertEqual(gamma.op.name, 'BatchNorm/gamma')
         moving_mean = tf.moving_average_variables()[0]
         moving_variance = tf.moving_average_variables()[1]
         self.assertEqual(moving_mean.op.name, 'BatchNorm/moving_mean')
         self.assertEqual(moving_variance.op.name,
                          'BatchNorm/moving_variance')
 def testCreateOp(self):
   height, width = 3, 3
   with self.test_session():
     images = tf.random_uniform((5, height, width, 3), seed=1)
     output = ops.batch_norm(images)
     self.assertTrue(output.op.name.startswith('BatchNorm/batchnorm'))
     self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
Example #14
0
 def testComputeMovingVars(self):
     height, width = 3, 3
     with self.test_session() as sess:
         image_shape = (10, height, width, 3)
         image_values = np.random.rand(*image_shape)
         expected_mean = np.mean(image_values, axis=(0, 1, 2))
         expected_var = np.var(image_values, axis=(0, 1, 2))
         images = tf.constant(image_values,
                              shape=image_shape,
                              dtype=tf.float32)
         output = ops.batch_norm(images, decay=0.1)
         update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
         with tf.control_dependencies(update_ops):
             barrier = tf.no_op(name='gradient_barrier')
             output = control_flow_ops.with_dependencies([barrier], output)
         # Initialize all variables
         sess.run(tf.initialize_all_variables())
         moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
         moving_variance = variables.get_variables(
             'BatchNorm/moving_variance')[0]
         mean, variance = sess.run([moving_mean, moving_variance])
         # After initialization moving_mean == 0 and moving_variance == 1.
         self.assertAllClose(mean, [0] * 3)
         self.assertAllClose(variance, [1] * 3)
         for _ in range(10):
             sess.run([output])
         mean = moving_mean.eval()
         variance = moving_variance.eval()
         # After 10 updates with decay 0.1 moving_mean == expected_mean and
         # moving_variance == expected_var.
         self.assertAllClose(mean, expected_mean)
         self.assertAllClose(variance, expected_var)
 def testReuseVars(self):
   height, width = 3, 3
   with self.test_session() as sess:
     image_shape = (10, height, width, 3)
     image_values = np.random.rand(*image_shape)
     expected_mean = np.mean(image_values, axis=(0, 1, 2))
     expected_var = np.var(image_values, axis=(0, 1, 2))
     images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)
     output = ops.batch_norm(images, decay=0.1, is_training=False)
     update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
     with tf.control_dependencies(update_ops):
       barrier = tf.no_op(name='gradient_barrier')
       output = control_flow_ops.with_dependencies([barrier], output)
     # Initialize all variables
     sess.run(tf.initialize_all_variables())
     moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
     moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
     mean, variance = sess.run([moving_mean, moving_variance])
     # After initialization moving_mean == 0 and moving_variance == 1.
     self.assertAllClose(mean, [0] * 3)
     self.assertAllClose(variance, [1] * 3)
     # Simulate assigment from saver restore.
     init_assigns = [tf.assign(moving_mean, expected_mean),
                     tf.assign(moving_variance, expected_var)]
     sess.run(init_assigns)
     for _ in range(10):
       sess.run([output], {images: np.random.rand(*image_shape)})
     mean = moving_mean.eval()
     variance = moving_variance.eval()
     # Although we feed different images, the moving_mean and moving_variance
     # shouldn't change.
     self.assertAllClose(mean, expected_mean)
     self.assertAllClose(variance, expected_var)
 def testCreateOp(self):
   height, width = 3, 3
   with self.test_session():
     images = tf.random_uniform((5, height, width, 3), seed=1)
     output = ops.batch_norm(images)
     self.assertTrue(output.op.name.startswith('BatchNorm/batchnorm'))
     self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
 def testReuseVars(self):
   height, width = 3, 3
   with self.test_session() as sess:
     image_shape = (10, height, width, 3)
     image_values = np.random.rand(*image_shape)
     expected_mean = np.mean(image_values, axis=(0, 1, 2))
     expected_var = np.var(image_values, axis=(0, 1, 2))
     images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)
     output = ops.batch_norm(images, decay=0.1, is_training=False)
     update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
     with tf.control_dependencies(update_ops):
       barrier = tf.no_op(name='gradient_barrier')
       output = control_flow_ops.with_dependencies([barrier], output)
     # Initialize all variables
     sess.run(tf.global_variables_initializer())
     moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
     moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
     mean, variance = sess.run([moving_mean, moving_variance])
     # After initialization moving_mean == 0 and moving_variance == 1.
     self.assertAllClose(mean, [0] * 3)
     self.assertAllClose(variance, [1] * 3)
     # Simulate assigment from saver restore.
     init_assigns = [tf.assign(moving_mean, expected_mean),
                     tf.assign(moving_variance, expected_var)]
     sess.run(init_assigns)
     for _ in range(10):
       sess.run([output], {images: np.random.rand(*image_shape)})
     mean = moving_mean.eval()
     variance = moving_variance.eval()
     # Although we feed different images, the moving_mean and moving_variance
     # shouldn't change.
     self.assertAllClose(mean, expected_mean)
     self.assertAllClose(variance, expected_var)
 def testComputeMovingVars(self):
   height, width = 3, 3
   with self.test_session() as sess:
     image_shape = (10, height, width, 3)
     image_values = np.random.rand(*image_shape)
     expected_mean = np.mean(image_values, axis=(0, 1, 2))
     expected_var = np.var(image_values, axis=(0, 1, 2))
     images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)
     output = ops.batch_norm(images, decay=0.1)
     update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
     with tf.control_dependencies(update_ops):
       barrier = tf.no_op(name='gradient_barrier')
       output = control_flow_ops.with_dependencies([barrier], output)
     # Initialize all variables
     sess.run(tf.global_variables_initializer())
     moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
     moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
     mean, variance = sess.run([moving_mean, moving_variance])
     # After initialization moving_mean == 0 and moving_variance == 1.
     self.assertAllClose(mean, [0] * 3)
     self.assertAllClose(variance, [1] * 3)
     for _ in range(10):
       sess.run([output])
     mean = moving_mean.eval()
     variance = moving_variance.eval()
     # After 10 updates with decay 0.1 moving_mean == expected_mean and
     # moving_variance == expected_var.
     self.assertAllClose(mean, expected_mean)
     self.assertAllClose(variance, expected_var)
 def testCreateMovingVars(self):
   height, width = 3, 3
   with self.test_session():
     images = tf.random_uniform((5, height, width, 3), seed=1)
     _ = ops.batch_norm(images, moving_vars='moving_vars')
     moving_mean = tf.get_collection('moving_vars',
                                     'BatchNorm/moving_mean')
     self.assertEquals(len(moving_mean), 1)
     self.assertEquals(moving_mean[0].op.name, 'BatchNorm/moving_mean')
     moving_variance = tf.get_collection('moving_vars',
                                         'BatchNorm/moving_variance')
     self.assertEquals(len(moving_variance), 1)
     self.assertEquals(moving_variance[0].op.name, 'BatchNorm/moving_variance')
 def testCreateMovingVars(self):
   height, width = 3, 3
   with self.test_session():
     images = tf.random_uniform((5, height, width, 3), seed=1)
     _ = ops.batch_norm(images, moving_vars='moving_vars')
     moving_mean = tf.get_collection('moving_vars',
                                     'BatchNorm/moving_mean')
     self.assertEquals(len(moving_mean), 1)
     self.assertEquals(moving_mean[0].op.name, 'BatchNorm/moving_mean')
     moving_variance = tf.get_collection('moving_vars',
                                         'BatchNorm/moving_variance')
     self.assertEquals(len(moving_variance), 1)
     self.assertEquals(moving_variance[0].op.name, 'BatchNorm/moving_variance')
Example #21
0
def bn_relu(inputs):
    return tf.nn.relu(ops.batch_norm(inputs))
Example #22
0
def inception_v3(inputs,
                 dropout_keep_prob=0.8,
                 num_classes=1001,
                 is_training=True,
                 restore_logits=True,
                 scope=''):
  """Latest Inception from http://arxiv.org/abs/1512.00567.

    "Rethinking the Inception Architecture for Computer Vision"

    Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,
    Zbigniew Wojna

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    dropout_keep_prob: dropout keep_prob.
    num_classes: number of predicted classes.
    is_training: whether is training or not.
    restore_logits: whether or not the logits layers should be restored.
      Useful for fine-tuning a model with different num_classes.
    scope: Optional scope for op_scope.

  Returns:
    a list containing 'logits', 'aux_logits' Tensors.
  """
  # end_points will collect relevant activations for external use, for example
  # summaries or losses.
  end_points = {}
  with tf.op_scope([inputs], scope, 'baxNet'):
    with scopes.arg_scope([ops.conv2d, ops.fc, ops.batch_norm, ops.dropout],
                          is_training=is_training):
      with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
                            stride=1, padding='VALID'):
        # 256 x 256 x 3
        end_points['conv0'] = ops.conv2d(inputs, 8, [5, 5], stride=1,
                                         scope='conv0', padding='SAME')
        
        end_points['batch_norm1'] = ops.batch_norm(end_points['conv0'], scope='batch_norm1')

        # 256 x 256 x 32
        end_points['conv1'] = ops.conv2d(end_points['batch_norm1'], 16, [3, 3],
                                         scope='conv1', padding='SAME')

        end_points['batch_norm2'] = ops.batch_norm(end_points['conv1'], scope='batch_norm2')

        # 128 x 128 x 64
        end_points['conv2'] = ops.conv2d(end_points['batch_norm2'], 16, [3, 3],
                                         scope='conv2', padding='SAME')
        
        end_points['batch_norm3'] = ops.batch_norm(end_points['conv2'], scope='batch_norm3')

        in_net = end_points['batch_norm3']
        print('IN_NET SHAPE')
        print(in_net.get_shape())
        curr_filters = 16
        base_layer_num = [32,16,8,4]
        for i in xrange(1,5):
          for j in xrange(1,base_layer_num[i-1] + i):
            with tf.variable_scope('res%d_%d' % (i,j)):
              if (j < (base_layer_num[i-1] + i - 1)):
                curr_padding = 'SAME'
                curr_stride = 1
              else:
                curr_filters = 2*curr_filters
                curr_padding = 'SAME'
                curr_stride = 2

              conv1_1 = ops.conv2d(in_net, curr_filters, [3, 3], padding=curr_padding, stride=curr_stride, scope='conv1_1')
              batch_norm1_1 = ops.batch_norm(conv1_1, scope='batch_norm1_1')
              conv1_2 = ops.conv2d(batch_norm1_1, curr_filters, [3, 3], padding='SAME', scope='conv1_2')
              if (j < (base_layer_num[i-1] + i - 1)):
                combined = in_net + conv1_2
              else:
                combined = ops.conv2d(in_net, curr_filters, [1, 1], padding='SAME', stride=2, scope='combined')
                combined = combined + conv1_2
                print('DOWN SAMPLE')
                print(in_net.get_shape())
                print(combined.get_shape())
              batch_norm1_2 = ops.batch_norm(combined, scope='batch_norm1_2')
              in_net = batch_norm1_2
              end_points['res%d_%d' %(i,j)] = in_net

#        for i in xrange(1,int(np.log2(in_net.get_shape()[1])) + 1):
#        print('SHAPPEEEE')
        print(in_net.get_shape())
        for i in xrange(1,3):
          with tf.variable_scope('res_final%d' % i):
            conv1_1 = ops.conv2d(in_net, curr_filters, [3, 3], padding='SAME', stride=2, scope='conv1_1')
            batch_norm1_1 = ops.batch_norm(conv1_1, scope='batch_norm1_1')
            conv1_2 = ops.conv2d(batch_norm1_1, curr_filters, [3, 3], padding='SAME', scope='conv1_2')
            combined = ops.conv2d(in_net, curr_filters, [1, 1], padding='SAME', stride=2, scope='combined')
            combined = combined + conv1_2
            batch_norm1_2 = ops.batch_norm(combined, scope='batch_norm1_2')
            in_net = batch_norm1_2
            end_points['res_final%d' % i] = in_net

        with tf.variable_scope('logits'):
          shape = in_net.get_shape()
          print('FINAL SHAPE')
          print(shape)
          if (shape[1] > 1):
            in_net = ops.avg_pool(in_net, shape[1:3], padding='VALID', scope='avg_pool')
          in_net = ops.flatten(in_net, scope='flatten')
          logits = ops.fc(in_net, num_classes, activation=None, scope='logits',
                          restore=restore_logits)
          end_points['logits'] = logits
          end_points['predictions'] = tf.nn.softmax(logits, name='predictions')
          
      return logits, end_points