Пример #1
0
 def testGetVariablesToRestore(self):
     with self.test_session():
         with tf.variable_scope('A'):
             a = variables.variable('a', [5])
         with tf.variable_scope('B'):
             b = variables.variable('a', [5])
         self.assertEquals([a, b], variables.get_variables_to_restore())
Пример #2
0
    def testVariableWithVariableDeviceChooser(self):

        with tf.Graph().as_default():
            device_fn = variables.VariableDeviceChooser(
                num_parameter_servers=2)
            with scopes.arg_scope([variables.variable], device=device_fn):
                a = variables.variable('a', [])
                b = variables.variable('b', [])
                c = variables.variable('c', [], device='cpu:12')
                d = variables.variable('d', [])
                with tf.device('cpu:99'):
                    e_init = tf.constant(12)
                e = variables.variable('e', initializer=e_init)
            # The values below highlight how the VariableDeviceChooser puts initial
            # values on the same device as the variable job.
            self.assertDeviceEqual(a.device, '/job:ps/task:0/cpu:0')
            self.assertDeviceEqual(a.initial_value.device, a.device)
            self.assertDeviceEqual(b.device, '/job:ps/task:1/cpu:0')
            self.assertDeviceEqual(b.initial_value.device, b.device)
            self.assertDeviceEqual(c.device, '/cpu:12')
            self.assertDeviceEqual(c.initial_value.device, c.device)
            self.assertDeviceEqual(d.device, '/job:ps/task:0/cpu:0')
            self.assertDeviceEqual(d.initial_value.device, d.device)
            self.assertDeviceEqual(e.device, '/job:ps/task:1/cpu:0')
            self.assertDeviceEqual(e.initial_value.device, '/cpu:99')
Пример #3
0
 def testGetVariablesToRestore(self):
   with self.test_session():
     with tf.variable_scope('A'):
       a = variables.variable('a', [5])
     with tf.variable_scope('B'):
       b = variables.variable('a', [5])
     self.assertEquals([a, b], variables.get_variables_to_restore())
Пример #4
0
 def testVariableWithDeviceFromScope(self):
   with self.test_session():
     with tf.device('/cpu:0'):
       a = variables.variable('a', [])
       b = variables.variable('b', [], device='cpu:1')
     self.assertDeviceEqual(a.device, 'cpu:0')
     self.assertDeviceEqual(b.device, 'cpu:1')
Пример #5
0
    def testVariableWithDeviceFunction(self):
        class DevFn(object):
            def __init__(self):
                self.counter = -1

            def __call__(self, op):
                self.counter += 1
                return 'cpu:%d' % self.counter

        with self.test_session():
            with scopes.arg_scope([variables.variable], device=DevFn()):
                a = variables.variable('a', [])
                b = variables.variable('b', [])
                c = variables.variable('c', [], device='cpu:12')
                d = variables.variable('d', [])
                with tf.device('cpu:99'):
                    e_init = tf.constant(12)
                e = variables.variable('e', initializer=e_init)
            self.assertDeviceEqual(a.device, 'cpu:0')
            self.assertDeviceEqual(a.initial_value.device, 'cpu:0')
            self.assertDeviceEqual(b.device, 'cpu:1')
            self.assertDeviceEqual(b.initial_value.device, 'cpu:1')
            self.assertDeviceEqual(c.device, 'cpu:12')
            self.assertDeviceEqual(c.initial_value.device, 'cpu:12')
            self.assertDeviceEqual(d.device, 'cpu:2')
            self.assertDeviceEqual(d.initial_value.device, 'cpu:2')
            self.assertDeviceEqual(e.device, 'cpu:3')
            self.assertDeviceEqual(e.initial_value.device, 'cpu:99')
Пример #6
0
 def testGetVariableGivenNameScoped(self):
     with self.test_session():
         with tf.variable_scope('A'):
             a = variables.variable('a', [5])
             b = variables.variable('b', [5])
             self.assertEquals([a], variables.get_variables_by_name('a'))
             self.assertEquals([b], variables.get_variables_by_name('b'))
Пример #7
0
 def testVariableWithDeviceFromScope(self):
     with self.test_session():
         with tf.device("/cpu:0"):
             a = variables.variable("a", [])
             b = variables.variable("b", [], device="cpu:1")
         self.assertDeviceEqual(a.device, "cpu:0")
         self.assertDeviceEqual(b.device, "cpu:1")
Пример #8
0
 def testGetVariableGivenNameScoped(self):
   with self.test_session():
     with tf.variable_scope('A'):
       a = variables.variable('a', [5])
       b = variables.variable('b', [5])
       self.assertEquals([a], variables.get_variables_by_name('a'))
       self.assertEquals([b], variables.get_variables_by_name('b'))
Пример #9
0
    def testVariableWithDeviceFunction(self):
        class DevFn(object):
            def __init__(self):
                self.counter = -1

            def __call__(self, op):
                self.counter += 1
                return "cpu:%d" % self.counter

        with self.test_session():
            with scopes.arg_scope([variables.variable], device=DevFn()):
                a = variables.variable("a", [])
                b = variables.variable("b", [])
                c = variables.variable("c", [], device="cpu:12")
                d = variables.variable("d", [])
                with tf.device("cpu:99"):
                    e_init = tf.constant(12)
                e = variables.variable("e", initializer=e_init)
            self.assertDeviceEqual(a.device, "cpu:0")
            self.assertDeviceEqual(a.initial_value.device, "cpu:0")
            self.assertDeviceEqual(b.device, "cpu:1")
            self.assertDeviceEqual(b.initial_value.device, "cpu:1")
            self.assertDeviceEqual(c.device, "cpu:12")
            self.assertDeviceEqual(c.initial_value.device, "cpu:12")
            self.assertDeviceEqual(d.device, "cpu:2")
            self.assertDeviceEqual(d.initial_value.device, "cpu:2")
            self.assertDeviceEqual(e.device, "cpu:3")
            self.assertDeviceEqual(e.initial_value.device, "cpu:99")
Пример #10
0
 def testVariableWithDeviceFromScope(self):
     with self.test_session():
         with tf.device('/cpu:0'):
             a = variables.variable('a', [])
             b = variables.variable('b', [], device='cpu:1')
         self.assertDeviceEqual(a.device, 'cpu:0')
         self.assertDeviceEqual(b.device, 'cpu:1')
Пример #11
0
 def testReuseVariable(self):
   with self.test_session():
     with tf.variable_scope('A'):
       a = variables.variable('a', [])
     with tf.variable_scope('A', reuse=True):
       b = variables.variable('a', [])
     self.assertEquals(a, b)
     self.assertListEqual([a], variables.get_variables())
Пример #12
0
 def testReuseVariable(self):
     with self.test_session():
         with tf.variable_scope('A'):
             a = variables.variable('a', [])
         with tf.variable_scope('A', reuse=True):
             b = variables.variable('a', [])
         self.assertEquals(a, b)
         self.assertListEqual([a], variables.get_variables())
Пример #13
0
 def testNoneGetVariablesToRestore(self):
     with self.test_session():
         with tf.variable_scope("A"):
             a = variables.variable("a", [5], restore=False)
         with tf.variable_scope("B"):
             b = variables.variable("a", [5], restore=False)
         self.assertEquals([], variables.get_variables_to_restore())
         self.assertEquals([a, b], variables.get_variables())
Пример #14
0
 def testGetVariablesSuffix(self):
     with self.test_session():
         with tf.variable_scope("A"):
             a = variables.variable("a", [5])
         with tf.variable_scope("A"):
             b = variables.variable("b", [5])
         self.assertEquals([a], variables.get_variables(suffix="a"))
         self.assertEquals([b], variables.get_variables(suffix="b"))
Пример #15
0
 def testVariableCollectionsWithArgScopeNested(self):
     with self.test_session():
         with scopes.arg_scope([variables.variable], collections="A"):
             a = variables.variable("a", [])
             with scopes.arg_scope([variables.variable], collections="B"):
                 b = variables.variable("b", [])
         self.assertEquals(a, tf.get_collection("A")[0])
         self.assertEquals(b, tf.get_collection("B")[0])
Пример #16
0
 def testGetVariableWithDistractors(self):
     with self.test_session():
         with tf.variable_scope('parent'):
             a = variables.variable('child', [5])
             with tf.variable_scope('child'):
                 variables.variable('grandchild1', [7])
                 variables.variable('grandchild2', [9])
         self.assertEquals(a, variables.get_unique_variable('parent/child'))
Пример #17
0
 def testGetThrowsExceptionWithChildrenButNoMatch(self):
     var_name = 'parent/child'
     with self.test_session():
         with tf.variable_scope(var_name):
             variables.variable('grandchild1', [7])
             variables.variable('grandchild2', [9])
         with self.assertRaises(ValueError):
             variables.get_unique_variable(var_name)
Пример #18
0
 def testVariableCollectionsWithArgScopeNested(self):
   with self.test_session():
     with scopes.arg_scope([variables.variable], collections='A'):
       a = variables.variable('a', [])
       with scopes.arg_scope([variables.variable], collections='B'):
         b = variables.variable('b', [])
     self.assertEquals(a, tf.get_collection('A')[0])
     self.assertEquals(b, tf.get_collection('B')[0])
Пример #19
0
 def testGetVariableWithDistractors(self):
     with self.test_session():
         with tf.variable_scope("parent"):
             a = variables.variable("child", [5])
             with tf.variable_scope("child"):
                 variables.variable("grandchild1", [7])
                 variables.variable("grandchild2", [9])
         self.assertEquals(a, variables.get_unique_variable("parent/child"))
Пример #20
0
 def testGetVariablesSuffix(self):
     with self.test_session():
         with tf.variable_scope('A'):
             a = variables.variable('a', [5])
         with tf.variable_scope('A'):
             b = variables.variable('b', [5])
         self.assertEquals([a], variables.get_variables(suffix='a'))
         self.assertEquals([b], variables.get_variables(suffix='b'))
Пример #21
0
 def testGetVariableGivenName(self):
   with self.test_session():
     with tf.variable_scope('A'):
       a = variables.variable('a', [5])
     with tf.variable_scope('B'):
       b = variables.variable('a', [5])
     self.assertEquals('a', variables.get_variable_given_name(a))
     self.assertEquals('a', variables.get_variable_given_name(b))
Пример #22
0
 def testNoneGetVariablesToRestore(self):
     with self.test_session():
         with tf.variable_scope('A'):
             a = variables.variable('a', [5], restore=False)
         with tf.variable_scope('B'):
             b = variables.variable('a', [5], restore=False)
         self.assertEqual([], variables.get_variables_to_restore())
         self.assertEqual([a, b], variables.get_variables())
Пример #23
0
 def testGetVariablesToRestore(self):
   with self.test_session():
     with tf.variable_scope('A'):
       a = variables.variable('a', [5])
     with tf.variable_scope('B'):
       b = variables.variable('b', [5])
     self.assertListEqual([a, b],
                          tf.get_collection(variables.VARIABLES_TO_RESTORE))
Пример #24
0
 def testGetVariableWithDistractors(self):
   with self.test_session():
     with tf.variable_scope('parent'):
       a = variables.variable('child', [5])
       with tf.variable_scope('child'):
         variables.variable('grandchild1', [7])
         variables.variable('grandchild2', [9])
     self.assertEquals(a, variables.get_unique_variable('parent/child'))
Пример #25
0
 def testGetThrowsExceptionWithChildrenButNoMatch(self):
   var_name = 'parent/child'
   with self.test_session():
     with tf.variable_scope(var_name):
       variables.variable('grandchild1', [7])
       variables.variable('grandchild2', [9])
     with self.assertRaises(ValueError):
       variables.get_unique_variable(var_name)
Пример #26
0
 def testGetVariablesSuffix(self):
   with self.test_session():
     with tf.variable_scope('A'):
       a = variables.variable('a', [5])
     with tf.variable_scope('A'):
       b = variables.variable('b', [5])
     self.assertEquals([a], variables.get_variables(suffix='a'))
     self.assertEquals([b], variables.get_variables(suffix='b'))
Пример #27
0
 def testVariableCollectionsWithArgScopeNested(self):
     with self.test_session():
         with scopes.arg_scope([variables.variable], collections='A'):
             a = variables.variable('a', [])
             with scopes.arg_scope([variables.variable], collections='B'):
                 b = variables.variable('b', [])
         self.assertEquals(a, tf.get_collection('A')[0])
         self.assertEquals(b, tf.get_collection('B')[0])
Пример #28
0
 def testGetVariables(self):
     with self.test_session():
         with tf.variable_scope('A'):
             a = variables.variable('a', [5])
         with tf.variable_scope('B'):
             b = variables.variable('a', [5])
         self.assertEqual([a, b], variables.get_variables())
         self.assertEqual([a], variables.get_variables('A'))
         self.assertEqual([b], variables.get_variables('B'))
Пример #29
0
 def testVariableCollectionsWithArgScopeNonNested(self):
     with self.test_session():
         with scopes.arg_scope([variables.variable], collections="A"):
             a = variables.variable("a", [])
         with scopes.arg_scope([variables.variable], collections="B"):
             b = variables.variable("b", [])
         variables.variable("c", [])
         self.assertListEqual([a], tf.get_collection("A"))
         self.assertListEqual([b], tf.get_collection("B"))
Пример #30
0
 def testVariableCollectionsWithArgScopeNonNested(self):
   with self.test_session():
     with scopes.arg_scope([variables.variable], collections='A'):
       a = variables.variable('a', [])
     with scopes.arg_scope([variables.variable], collections='B'):
       b = variables.variable('b', [])
     variables.variable('c', [])
     self.assertListEqual([a], tf.get_collection('A'))
     self.assertListEqual([b], tf.get_collection('B'))
Пример #31
0
 def testVariableCollectionsWithArgScopeNonNested(self):
     with self.test_session():
         with scopes.arg_scope([variables.variable], collections='A'):
             a = variables.variable('a', [])
         with scopes.arg_scope([variables.variable], collections='B'):
             b = variables.variable('b', [])
         variables.variable('c', [])
         self.assertListEqual([a], tf.get_collection('A'))
         self.assertListEqual([b], tf.get_collection('B'))
Пример #32
0
 def testGetMixedVariablesToRestore(self):
   with self.test_session():
     with tf.variable_scope('A'):
       a = variables.variable('a', [5])
       b = variables.variable('b', [5], restore=False)
     with tf.variable_scope('B'):
       c = variables.variable('c', [5])
       d = variables.variable('d', [5], restore=False)
     self.assertEquals([a, b, c, d], variables.get_variables())
     self.assertEquals([a, c], variables.get_variables_to_restore())
Пример #33
0
 def testGetMixedVariablesToRestore(self):
     with self.test_session():
         with tf.variable_scope("A"):
             a = variables.variable("a", [5])
             b = variables.variable("b", [5], restore=False)
         with tf.variable_scope("B"):
             c = variables.variable("c", [5])
             d = variables.variable("d", [5], restore=False)
         self.assertEquals([a, b, c, d], variables.get_variables())
         self.assertEquals([a, c], variables.get_variables_to_restore())
Пример #34
0
 def testGetMixedVariablesToRestore(self):
     with self.test_session():
         with tf.variable_scope('A'):
             a = variables.variable('a', [5])
             b = variables.variable('b', [5], restore=False)
         with tf.variable_scope('B'):
             c = variables.variable('c', [5])
             d = variables.variable('d', [5], restore=False)
         self.assertEquals([a, b, c, d], variables.get_variables())
         self.assertEquals([a, c], variables.get_variables_to_restore())
Пример #35
0
 def testVariableRestoreWithArgScopeNested(self):
     with self.test_session():
         with scopes.arg_scope([variables.variable], restore=True):
             a = variables.variable("a", [])
             with scopes.arg_scope([variables.variable], trainable=False, collections=["A", "B"]):
                 b = variables.variable("b", [])
             c = variables.variable("c", [])
         self.assertListEqual([a, b, c], variables.get_variables_to_restore())
         self.assertListEqual([a, c], tf.trainable_variables())
         self.assertListEqual([b], tf.get_collection("A"))
         self.assertListEqual([b], tf.get_collection("B"))
Пример #36
0
def conv2d(inputs,
           num_filters_out,
           kernel_size,
           stride=1,
           padding='SAME',
           activation=tf.nn.relu,
           stddev=0.01,
           bias=0.0,
           weight_decay=0,
           batch_norm_params=None,
           is_training=True,
           trainable=True,
           restore=True,
           scope=None,
           reuse=None):

    with tf.variable_scope(scope, 'Conv', [inputs], reuse=reuse):
        kernel_h, kernel_w = _two_element_tuple(kernel_size)
        stride_h, stride_w = _two_element_tuple(stride)
        num_filters_in = inputs.get_shape()[-1]
        weights_shape = [kernel_h, kernel_w, num_filters_in, num_filters_out]
        weights_initializer = tf.truncated_normal_initializer(stddev=stddev)
        l2_regularizer = None
        if weight_decay and weight_decay > 0:
            l2_regularizer = losses.l2_regularizer(weight_decay)
        weights = variables.variable('weights',
                                     shape=weights_shape,
                                     initializer=weights_initializer,
                                     regularizer=l2_regularizer,
                                     trainable=trainable,
                                     restore=restore)
        conv = tf.nn.conv2d(inputs,
                            weights, [1, stride_h, stride_w, 1],
                            padding=padding)
        if batch_norm_params is not None:
            with scopes.arg_scope([batch_norm],
                                  is_training=is_training,
                                  trainable=trainable,
                                  restore=restore):
                outputs = batch_norm(conv, **batch_norm_params)
        else:
            bias_shape = [
                num_filters_out,
            ]
            bias_initializer = tf.constant_initializer(bias)
            biases = variables.variable('biases',
                                        shape=bias_shape,
                                        initializer=bias_initializer,
                                        trainable=trainable,
                                        restore=restore)
            outputs = tf.nn.bias_add(conv, biases)
        if activation:
            outputs = activation(outputs)
        return outputs
Пример #37
0
 def testVariableRestoreWithArgScopeNested(self):
   with self.test_session():
     with scopes.arg_scope([variables.variable], restore=True):
       a = variables.variable('a', [])
       with scopes.arg_scope([variables.variable], trainable=False,
                             collections=['A', 'B']):
         b = variables.variable('b', [])
       c = variables.variable('c', [])
     self.assertListEqual([a, b, c], variables.get_variables_to_restore())
     self.assertListEqual([a, c], tf.trainable_variables())
     self.assertListEqual([b], tf.get_collection('A'))
     self.assertListEqual([b], tf.get_collection('B'))
Пример #38
0
 def testVariableRestoreWithArgScopeNested(self):
   with self.test_session():
     with scopes.arg_scope([variables.variable], restore=True):
       a = variables.variable('a', [])
       with scopes.arg_scope([variables.variable], trainable=False,
                             collections=['A', 'B']):
         b = variables.variable('b', [])
       c = variables.variable('c', [])
     self.assertListEqual([a, b, c],
                          tf.get_collection(variables.VARIABLES_TO_RESTORE))
     self.assertListEqual([a, c], tf.trainable_variables())
     self.assertListEqual([b], tf.get_collection('A'))
     self.assertListEqual([b], tf.get_collection('B'))
Пример #39
0
def fc(inputs,
       num_units_out,
       activation=tf.nn.relu,
       stddev=0.01,
       bias=0.0,
       weight_decay=0,
       batch_norm_params=None,
       is_training=True,
       trainable=True,
       restore=True,
       scope=None,
       reuse=None):

    with tf.variable_scope(scope, 'FC', [inputs], reuse=reuse):
        num_units_in = inputs.get_shape()[1]
        weights_shape = [num_units_in, num_units_out]
        weights_initializer = tf.truncated_normal_initializer(stddev=stddev)
        l2_regularizer = None
        if weight_decay and weight_decay > 0:
            l2_regularizer = losses.l2_regularizer(weight_decay)
        weights = variables.variable('weights',
                                     shape=weights_shape,
                                     initializer=weights_initializer,
                                     regularizer=l2_regularizer,
                                     trainable=trainable,
                                     restore=restore)
        if batch_norm_params is not None:
            outputs = tf.matmul(inputs, weights)
            with scopes.arg_scope([batch_norm],
                                  is_training=is_training,
                                  trainable=trainable,
                                  restore=restore):
                outputs = batch_norm(outputs, **batch_norm_params)
        else:
            bias_shape = [
                num_units_out,
            ]
            bias_initializer = tf.constant_initializer(bias)
            biases = variables.variable('biases',
                                        shape=bias_shape,
                                        initializer=bias_initializer,
                                        trainable=trainable,
                                        restore=restore)
            outputs = tf.nn.xw_plus_b(inputs, weights, biases)
        if activation:
            outputs = activation(outputs)
        return outputs
Пример #40
0
  def testGetVariablesByNameReturnsByValueWithoutScope(self):
    with self.test_session():
      a = variables.variable('a', [5])
      matched_variables = variables.get_variables_by_name('a')

      # If variables.get_variables_by_name returns the list by reference, the
      # following append should persist, and be returned, in subsequent calls
      # to variables.get_variables_by_name('a').
      matched_variables.append(4)

      matched_variables = variables.get_variables_by_name('a')
      self.assertEquals([a], matched_variables)
Пример #41
0
    def testGetVariablesByNameReturnsByValueWithoutScope(self):
        with self.test_session():
            a = variables.variable('a', [5])
            matched_variables = variables.get_variables_by_name('a')

            # If variables.get_variables_by_name returns the list by reference, the
            # following append should persist, and be returned, in subsequent calls
            # to variables.get_variables_by_name('a').
            matched_variables.append(4)

            matched_variables = variables.get_variables_by_name('a')
            self.assertEquals([a], matched_variables)
Пример #42
0
 def testVariableWithReplicaDeviceSetter(self):
     with self.test_session():
         with tf.device(tf.train.replica_device_setter(ps_tasks=2)):
             a = variables.variable("a", [])
             b = variables.variable("b", [])
             c = variables.variable("c", [], device="cpu:12")
             d = variables.variable("d", [])
             with tf.device("cpu:99"):
                 e_init = tf.constant(12)
             e = variables.variable("e", initializer=e_init)
         # The values below highlight how the replica_device_setter puts initial
         # values on the worker job, and how it merges explicit devices.
         self.assertDeviceEqual(a.device, "/job:ps/task:0/cpu:0")
         self.assertDeviceEqual(a.initial_value.device, "/job:worker/cpu:0")
         self.assertDeviceEqual(b.device, "/job:ps/task:1/cpu:0")
         self.assertDeviceEqual(b.initial_value.device, "/job:worker/cpu:0")
         self.assertDeviceEqual(c.device, "/job:ps/task:0/cpu:12")
         self.assertDeviceEqual(c.initial_value.device, "/job:worker/cpu:12")
         self.assertDeviceEqual(d.device, "/job:ps/task:1/cpu:0")
         self.assertDeviceEqual(d.initial_value.device, "/job:worker/cpu:0")
         self.assertDeviceEqual(e.device, "/job:ps/task:0/cpu:0")
         self.assertDeviceEqual(e.initial_value.device, "/job:worker/cpu:99")
Пример #43
0
 def testVariableWithReplicaDeviceSetter(self):
   with self.test_session():
     with tf.device(tf.train.replica_device_setter(ps_tasks=2)):
       a = variables.variable('a', [])
       b = variables.variable('b', [])
       c = variables.variable('c', [], device='cpu:12')
       d = variables.variable('d', [])
       with tf.device('cpu:99'):
         e_init = tf.constant(12)
       e = variables.variable('e', initializer=e_init)
     # The values below highlight how the replica_device_setter puts initial
     # values on the worker job, and how it merges explicit devices.
     self.assertDeviceEqual(a.device, '/job:ps/task:0/cpu:0')
     self.assertDeviceEqual(a.initial_value.device, '/job:worker/cpu:0')
     self.assertDeviceEqual(b.device, '/job:ps/task:1/cpu:0')
     self.assertDeviceEqual(b.initial_value.device, '/job:worker/cpu:0')
     self.assertDeviceEqual(c.device, '/job:ps/task:0/cpu:12')
     self.assertDeviceEqual(c.initial_value.device, '/job:worker/cpu:12')
     self.assertDeviceEqual(d.device, '/job:ps/task:1/cpu:0')
     self.assertDeviceEqual(d.initial_value.device, '/job:worker/cpu:0')
     self.assertDeviceEqual(e.device, '/job:ps/task:0/cpu:0')
     self.assertDeviceEqual(e.initial_value.device, '/job:worker/cpu:99')
Пример #44
0
    def testVariableGPUPlacement(self):

        with tf.Graph().as_default():
            device_fn = variables.VariableDeviceChooser(placement="gpu:0")
            with scopes.arg_scope([variables.variable], device=device_fn):
                a = variables.variable("a", [])
                b = variables.variable("b", [])
                c = variables.variable("c", [], device="cpu:12")
                d = variables.variable("d", [])
                with tf.device("cpu:99"):
                    e_init = tf.constant(12)
                e = variables.variable("e", initializer=e_init)
            # The values below highlight how the VariableDeviceChooser puts initial
            # values on the same device as the variable job.
            self.assertDeviceEqual(a.device, "/gpu:0")
            self.assertDeviceEqual(a.initial_value.device, a.device)
            self.assertDeviceEqual(b.device, "/gpu:0")
            self.assertDeviceEqual(b.initial_value.device, b.device)
            self.assertDeviceEqual(c.device, "/cpu:12")
            self.assertDeviceEqual(c.initial_value.device, c.device)
            self.assertDeviceEqual(d.device, "/gpu:0")
            self.assertDeviceEqual(d.initial_value.device, d.device)
            self.assertDeviceEqual(e.device, "/gpu:0")
            self.assertDeviceEqual(e.initial_value.device, "/cpu:99")
Пример #45
0
 def testCreateVariable(self):
   with self.test_session():
     with tf.variable_scope('A'):
       a = variables.variable('a', [5])
       self.assertEquals(a.op.name, 'A/a')
       self.assertListEqual(a.get_shape().as_list(), [5])
Пример #46
0
def conv2d(inputs,
           num_filters_out,
           kernel_size,
           stride=1,
           padding='SAME',
           activation=tf.nn.relu,
           stddev=0.01,
           bias=0.0,
           weight_decay=0,
           batch_norm_params=None,
           is_training=True,
           trainable=True,
           restore=True,
           scope=None,
           reuse=None):
  """Adds a 2D convolution followed by an optional batch_norm layer.

  conv2d creates a variable called 'weights', representing the convolutional
  kernel, that is convolved with the input. If `batch_norm_params` is None, a
  second variable called 'biases' is added to the result of the convolution
  operation.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_filters_out: the number of output filters.
    kernel_size: a list of length 2: [kernel_height, kernel_width] of
      of the filters. Can be an int if both values are the same.
    stride: a list of length 2: [stride_height, stride_width].
      Can be an int if both strides are the same.  Note that presently
      both strides must have the same value.
    padding: one of 'VALID' or 'SAME'.
    activation: activation function.
    stddev: standard deviation of the truncated guassian weight distribution.
    bias: the initial value of the biases.
    weight_decay: the weight decay.
    batch_norm_params: parameters for the batch_norm. If is None don't use it.
    is_training: whether or not the model is in training mode.
    trainable: whether or not the variables should be trainable or not.
    restore: whether or not the variables should be marked for restore.
    scope: Optional scope for variable_scope.
    reuse: whether or not the layer and its variables should be reused. To be
      able to reuse the layer scope must be given.
  Returns:
    a tensor representing the output of the operation.

  """
  with tf.variable_scope(scope, 'Conv', [inputs], reuse=reuse):
    kernel_h, kernel_w = _two_element_tuple(kernel_size)
    stride_h, stride_w = _two_element_tuple(stride)
    num_filters_in = inputs.get_shape()[-1]
    weights_shape = [kernel_h, kernel_w,
                     num_filters_in, num_filters_out]
    weights_initializer = tf.truncated_normal_initializer(stddev=stddev)
    l2_regularizer = None
    if weight_decay and weight_decay > 0:
      l2_regularizer = losses.l2_regularizer(weight_decay)
    weights = variables.variable('weights',
                                 shape=weights_shape,
                                 initializer=weights_initializer,
                                 regularizer=l2_regularizer,
                                 trainable=trainable,
                                 restore=restore)
    conv = tf.nn.conv2d(inputs, weights, [1, stride_h, stride_w, 1],
                        padding=padding)
    if batch_norm_params is not None:
      with scopes.arg_scope([batch_norm], is_training=is_training,
                            trainable=trainable, restore=restore):
        outputs = batch_norm(conv, **batch_norm_params)
    else:
      bias_shape = [num_filters_out,]
      bias_initializer = tf.constant_initializer(bias)
      biases = variables.variable('biases',
                                  shape=bias_shape,
                                  initializer=bias_initializer,
                                  trainable=trainable,
                                  restore=restore)
      outputs = tf.nn.bias_add(conv, biases)
    if activation:
      outputs = activation(outputs)
    return outputs
Пример #47
0
 def testGetVariableWithSingleVar(self):
     with self.test_session():
         with tf.variable_scope('parent'):
             a = variables.variable('child', [5])
         self.assertEquals(a, variables.get_unique_variable('parent/child'))
Пример #48
0
    def forward(self):
        pad = [[self.lay.pad, self.lay.pad]] * 2
        temp1 = tf.pad(self.inp.out, [[0, 0]] + pad + [[0, 0]])
        # temp = tf.nn.conv2d(temp1, self.lay.w['kernel'], padding = 'VALID',
        #    name = self.scope, strides = [1] + [self.lay.stride] * 2 + [1])

        kernel_h = int(self.lay.wshape['kernel'][0])
        # print("kernel size: ", self.lay.w['kernel'].get_shape())
        # print("kernel_h: ", kernel_h)

        kernel_w = int(self.lay.wshape['kernel'][1])
        # print("kernel_w: ", kernel_w)
        print("kernel size2: ", self.lay.wshape['kernel'])
        stride_h = self.lay.stride
        stride_w = self.lay.stride
        num_filters_in = int(temp1.get_shape()[-1])
        num_filters_out = int(self.lay.wshape['kernel'][-1])

        # print("num_filters_out: ", num_filters_out)

        def block_indx(k, rc, cc):
            rc = int((rc + k - 1) // k) * k
            cc = int((cc + k - 1) // k) * k
            i = np.arange(0, k, 1).reshape([1, k])
            j = np.arange(0, -k, -1).reshape([k, 1])
            indx = (i + j).T
            indx = (indx + k) % k
            m = np.tile(indx, [int(rc / k), int(cc / k)])
            offset = np.arange(0, rc * cc)
            i = (offset / cc) // k
            j = (offset % cc) // k
            offset = (i * cc + j * k).reshape([rc, cc])
            return m + offset

        #if np.min([num_filters_out, num_filters_in]) == 16:
        #    partition_size = 16
        #else:
        partition_size = 16

        if partition_size and partition_size <= np.min(
            [num_filters_out, num_filters_in]):
            k = partition_size
            indx = block_indx(k, num_filters_out, num_filters_in)
            # print(indx)
            target_c = num_filters_in * num_filters_out // k
            print(
                "Leo: congratulations!!!!!!!!!!!!!!!!!! you are using BlockCircConv2D",
                partition_size)
        else:
            print("Leo: sorry, not enough size for partitoning",
                  num_filters_out, num_filters_in, kernel_h, kernel_w)
            target_c = np.max([num_filters_in, num_filters_out])
            a, b = np.ogrid[0:target_c, 0:-target_c:-1]
            indx = a + b

        print('target_c:{}'.format(target_c))
        indx = (indx + target_c) % target_c
        # np.set_printoptions(threshold=np.inf)
        print(indx[:num_filters_out, :num_filters_in].astype(np.int32))
        indx = tf.constant(indx[:num_filters_out, :num_filters_in].astype(
            np.int32))

        with tf.variable_scope(self.scope,
                               'conv', [temp1],
                               reuse=tf.AUTO_REUSE):
            weights_shape = [target_c, kernel_h * kernel_w]
            n = kernel_h * kernel_w * num_filters_out
            weights_initializer = tf.truncated_normal_initializer(
                stddev=np.sqrt(2.0 / int(n)))  # stddev)

            # l2_regularizer = losses.l2_regularizer(0.0005)

            weights = vars.variable('weights',
                                    shape=weights_shape,
                                    initializer=weights_initializer,
                                    regularizer=None,
                                    trainable=True,
                                    restore=True)

            self.lay.w["weights"] = weights

            weights = tf.reshape(
                tf.transpose(tf.gather(weights, indx), [2, 1, 0]), [
                    int(kernel_h),
                    int(kernel_w),
                    int(num_filters_in),
                    int(num_filters_out)
                ])

            conv = tf.nn.conv2d(temp1,
                                weights, [1, stride_h, stride_w, 1],
                                name=self.scope,
                                padding='VALID')
            self.temp_out = conv
            if self.lay.batch_norm:
                conv = self.batchnorm(self.lay, conv)
            self.out = tf.nn.bias_add(conv, self.lay.w['biases'])
            self.flag = 0
Пример #49
0
def fc(inputs,
       num_units_out,
       activation=tf.nn.relu,
       stddev=0.01,
       bias=0.0,
       weight_decay=0,
       batch_norm_params=None,
       is_training=True,
       trainable=True,
       restore=True,
       scope=None,
       reuse=None):
  """Adds a fully connected layer followed by an optional batch_norm layer.

  FC creates a variable called 'weights', representing the fully connected
  weight matrix, that is multiplied by the input. If `batch_norm` is None, a
  second variable called 'biases' is added to the result of the initial
  vector-matrix multiplication.

  Args:
    inputs: a [B x N] tensor where B is the batch size and N is the number of
            input units in the layer.
    num_units_out: the number of output units in the layer.
    activation: activation function.
    stddev: the standard deviation for the weights.
    bias: the initial value of the biases.
    weight_decay: the weight decay.
    batch_norm_params: parameters for the batch_norm. If is None don't use it.
    is_training: whether or not the model is in training mode.
    trainable: whether or not the variables should be trainable or not.
    restore: whether or not the variables should be marked for restore.
    scope: Optional scope for variable_scope.
    reuse: whether or not the layer and its variables should be reused. To be
      able to reuse the layer scope must be given.

  Returns:
     the tensor variable representing the result of the series of operations.
  """
  with tf.variable_scope(scope, 'FC', [inputs], reuse=reuse):
    num_units_in = inputs.get_shape()[1]
    weights_shape = [num_units_in, num_units_out]
    weights_initializer = tf.truncated_normal_initializer(stddev=stddev)
    l2_regularizer = None
    if weight_decay and weight_decay > 0:
      l2_regularizer = losses.l2_regularizer(weight_decay)
    weights = variables.variable('weights',
                                 shape=weights_shape,
                                 initializer=weights_initializer,
                                 regularizer=l2_regularizer,
                                 trainable=trainable,
                                 restore=restore)
    if batch_norm_params is not None:
      outputs = tf.matmul(inputs, weights)
      with scopes.arg_scope([batch_norm], is_training=is_training,
                            trainable=trainable, restore=restore):
        outputs = batch_norm(outputs, **batch_norm_params)
    else:
      bias_shape = [num_units_out,]
      bias_initializer = tf.constant_initializer(bias)
      biases = variables.variable('biases',
                                  shape=bias_shape,
                                  initializer=bias_initializer,
                                  trainable=trainable,
                                  restore=restore)
      outputs = tf.nn.xw_plus_b(inputs, weights, biases)
    if activation:
      outputs = activation(outputs)
    return outputs
Пример #50
0
def conv2d(inputs,
           num_filters_out,
           kernel_size,
           stride=1,
           padding='SAME',
           activation=tf.nn.relu,
           stddev=0.01,
           bias=0.0,
           weight_decay=0,
           batch_norm_params=None,
           is_training=True,
           trainable=True,
           restore=True,
           scope=None,
           reuse=None):
  """Adds a 2D convolution followed by an optional batch_norm layer.

  conv2d creates a variable called 'weights', representing the convolutional
  kernel, that is convolved with the input. If `batch_norm_params` is None, a
  second variable called 'biases' is added to the result of the convolution
  operation.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_filters_out: the number of output filters.
    kernel_size: a list of length 2: [kernel_height, kernel_width] of
      of the filters. Can be an int if both values are the same.
    stride: a list of length 2: [stride_height, stride_width].
      Can be an int if both strides are the same.  Note that presently
      both strides must have the same value.
    padding: one of 'VALID' or 'SAME'.
    activation: activation function.
    stddev: standard deviation of the truncated guassian weight distribution.
    bias: the initial value of the biases.
    weight_decay: the weight decay.
    batch_norm_params: parameters for the batch_norm. If is None don't use it.
    is_training: whether or not the model is in training mode.
    trainable: whether or not the variables should be trainable or not.
    restore: whether or not the variables should be marked for restore.
    scope: Optional scope for variable_op_scope.
    reuse: whether or not the layer and its variables should be reused. To be
      able to reuse the layer scope must be given.
  Returns:
    a tensor representing the output of the operation.

  """
  with tf.variable_op_scope([inputs], scope, 'Conv', reuse=reuse):
    kernel_h, kernel_w = _two_element_tuple(kernel_size)
    stride_h, stride_w = _two_element_tuple(stride)
    num_filters_in = inputs.get_shape()[-1]
    weights_shape = [kernel_h, kernel_w,
                     num_filters_in, num_filters_out]
    weights_initializer = tf.truncated_normal_initializer(stddev=stddev)
    l2_regularizer = None
    if weight_decay and weight_decay > 0:
      l2_regularizer = losses.l2_regularizer(weight_decay)
    weights = variables.variable('weights',
                                 shape=weights_shape,
                                 initializer=weights_initializer,
                                 regularizer=l2_regularizer,
                                 trainable=trainable,
                                 restore=restore)
    conv = tf.nn.conv2d(inputs, weights, [1, stride_h, stride_w, 1],
                        padding=padding)
    if batch_norm_params is not None:
      with scopes.arg_scope([batch_norm], is_training=is_training,
                            trainable=trainable, restore=restore):
        outputs = batch_norm(conv, **batch_norm_params)
    else:
      bias_shape = [num_filters_out,]
      bias_initializer = tf.constant_initializer(bias)
      biases = variables.variable('biases',
                                  shape=bias_shape,
                                  initializer=bias_initializer,
                                  trainable=trainable,
                                  restore=restore)
      outputs = tf.nn.bias_add(conv, biases)
    if activation:
      outputs = activation(outputs)
    return outputs
Пример #51
0
def batch_norm(inputs,
               decay=0.999,
               center=True,
               scale=False,
               epsilon=0.001,
               moving_vars='moving_vars',
               activation=None,
               is_training=True,
               trainable=True,
               restore=True,
               scope=None,
               reuse=None):
  """Adds a Batch Normalization layer.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels]
            or [batch_size, channels].
    decay: decay for the moving average.
    center: If True, subtract beta. If False, beta is not created and ignored.
    scale: If True, multiply by gamma. If False, gamma is
      not used. When the next layer is linear (also e.g. ReLU), this can be
      disabled since the scaling can be done by the next layer.
    epsilon: small float added to variance to avoid dividing by zero.
    moving_vars: collection to store the moving_mean and moving_variance.
    activation: activation function.
    is_training: whether or not the model is in training mode.
    trainable: whether or not the variables should be trainable or not.
    restore: whether or not the variables should be marked for restore.
    scope: Optional scope for variable_scope.
    reuse: whether or not the layer and its variables should be reused. To be
      able to reuse the layer scope must be given.

  Returns:
    a tensor representing the output of the operation.

  """
  inputs_shape = inputs.get_shape()
  with tf.variable_scope(scope, 'BatchNorm', [inputs], reuse=reuse):
    axis = list(range(len(inputs_shape) - 1))
    params_shape = inputs_shape[-1:]
    # Allocate parameters for the beta and gamma of the normalization.
    beta, gamma = None, None
    if center:
      beta = variables.variable('beta',
                                params_shape,
                                initializer=tf.constant_initializer(0.0),
                                trainable=trainable,
                                restore=restore)
    if scale:
      gamma = variables.variable('gamma',
                                 params_shape,
                                 initializer=tf.ones_initializer(),
                                 trainable=trainable,
                                 restore=restore)
    # Create moving_mean and moving_variance add them to
    # GraphKeys.MOVING_AVERAGE_VARIABLES collections.
    moving_collections = [moving_vars, tf.GraphKeys.MOVING_AVERAGE_VARIABLES]
    moving_mean = variables.variable('moving_mean',
                                     params_shape,
                                     initializer=tf.constant_initializer(0.0),
                                     trainable=False,
                                     restore=restore,
                                     collections=moving_collections)
    moving_variance = variables.variable('moving_variance',
                                         params_shape,
                                         initializer=tf.ones_initializer(),
                                         trainable=False,
                                         restore=restore,
                                         collections=moving_collections)
    if is_training:
      # Calculate the moments based on the individual batch.
      mean, variance = tf.nn.moments(inputs, axis)

      update_moving_mean = moving_averages.assign_moving_average(
          moving_mean, mean, decay)
      tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean)
      update_moving_variance = moving_averages.assign_moving_average(
          moving_variance, variance, decay)
      tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance)
    else:
      # Just use the moving_mean and moving_variance.
      mean = moving_mean
      variance = moving_variance
    # Normalize the activations.
    outputs = tf.nn.batch_normalization(
        inputs, mean, variance, beta, gamma, epsilon)
    outputs.set_shape(inputs.get_shape())
    if activation:
      outputs = activation(outputs)
    return outputs
Пример #52
0
 def testCreateVariable(self):
     with self.test_session():
         with tf.variable_scope('A'):
             a = variables.variable('a', [5])
             self.assertEquals(a.op.name, 'A/a')
             self.assertListEqual(a.get_shape().as_list(), [5])
Пример #53
0
def fc(inputs,
       num_units_out,
       activation=tf.nn.relu,
       stddev=0.01,
       bias=0.0,
       weight_decay=0,
       batch_norm_params=None,
       is_training=True,
       trainable=True,
       restore=True,
       scope=None,
       reuse=None):
  """Adds a fully connected layer followed by an optional batch_norm layer.

  FC creates a variable called 'weights', representing the fully connected
  weight matrix, that is multiplied by the input. If `batch_norm` is None, a
  second variable called 'biases' is added to the result of the initial
  vector-matrix multiplication.

  Args:
    inputs: a [B x N] tensor where B is the batch size and N is the number of
            input units in the layer.
    num_units_out: the number of output units in the layer.
    activation: activation function.
    stddev: the standard deviation for the weights.
    bias: the initial value of the biases.
    weight_decay: the weight decay.
    batch_norm_params: parameters for the batch_norm. If is None don't use it.
    is_training: whether or not the model is in training mode.
    trainable: whether or not the variables should be trainable or not.
    restore: whether or not the variables should be marked for restore.
    scope: Optional scope for variable_op_scope.
    reuse: whether or not the layer and its variables should be reused. To be
      able to reuse the layer scope must be given.

  Returns:
     the tensor variable representing the result of the series of operations.
  """
  with tf.variable_op_scope([inputs], scope, 'FC', reuse=reuse):
    num_units_in = inputs.get_shape()[1]
    weights_shape = [num_units_in, num_units_out]
    weights_initializer = tf.truncated_normal_initializer(stddev=stddev)
    l2_regularizer = None
    if weight_decay and weight_decay > 0:
      l2_regularizer = losses.l2_regularizer(weight_decay)
    weights = variables.variable('weights',
                                 shape=weights_shape,
                                 initializer=weights_initializer,
                                 regularizer=l2_regularizer,
                                 trainable=trainable,
                                 restore=restore)
    if batch_norm_params is not None:
      outputs = tf.matmul(inputs, weights)
      with scopes.arg_scope([batch_norm], is_training=is_training,
                            trainable=trainable, restore=restore):
        outputs = batch_norm(outputs, **batch_norm_params)
    else:
      bias_shape = [num_units_out,]
      bias_initializer = tf.constant_initializer(bias)
      biases = variables.variable('biases',
                                  shape=bias_shape,
                                  initializer=bias_initializer,
                                  trainable=trainable,
                                  restore=restore)
      outputs = tf.nn.xw_plus_b(inputs, weights, biases)
    if activation:
      outputs = activation(outputs)
    return outputs
Пример #54
0
def batch_norm(inputs,
               decay=0.999,
               scale=False,
               epsilon=0.001,
               moving_vars='moving_vars',
               activation=None,
               is_training=True,
               trainable=True,
               restore=True,
               scope=None,
               reuse=None):
  """Adds a Batch Normalization layer.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels]
            or [batch_size, channels].
    decay: decay for the moving average.
    scale: If True, multiply by gamma. If False, gamma is
      not used. When the next layer is linear (also e.g. ReLU), this can be
      disabled since the scaling can be done by the next layer.
    epsilon: small float added to variance to avoid dividing by zero.
    moving_vars: collection to store the moving_mean and moving_variance.
    activation: activation function.
    is_training: whether or not the model is in training mode.
    trainable: whether or not the variables should be trainable or not.
    restore: whether or not the variables should be marked for restore.
    scope: Optional scope for variable_op_scope.
    reuse: whether or not the layer and its variables should be reused. To be
      able to reuse the layer scope must be given.

  Returns:
    a tensor representing the output of the operation.

  """
  inputs_shape = inputs.get_shape()
  with tf.variable_op_scope([inputs], scope, 'BatchNorm', reuse=reuse):
    axis = range(len(inputs_shape) - 1)
    params_shape = inputs_shape[-1:]
    with scopes.arg_scope([variables.variable], restore=restore):
      # Allocate parameters for the beta and gamma of the normalization.
      beta = variables.variable('beta',
                                params_shape,
                                initializer=tf.zeros_initializer,
                                trainable=trainable)
      if scale:
        gamma = variables.variable('gamma',
                                   params_shape,
                                   initializer=tf.ones,
                                   trainable=trainable)
      else:
        gamma = None
      # Create moving_mean and moving_variance add them to moving_vars and
      # GraphKeys.MOVING_AVERAGE_VARIABLES collections.
      with scopes.arg_scope([variables.variable], trainable=False,
                            collections=[
                                moving_vars,
                                tf.GraphKeys.MOVING_AVERAGE_VARIABLES]):
        moving_mean = variables.variable('moving_mean',
                                         params_shape,
                                         initializer=tf.zeros_initializer)
        moving_variance = variables.variable('moving_variance',
                                             params_shape,
                                             initializer=tf.ones)
    if is_training:
      # Calculate the moments based on the individual batch.
      mean, variance = tf.nn.moments(inputs, axis)

      update_moving_mean = moving_averages.assign_moving_average(
          moving_mean, mean, decay)
      tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean)
      update_moving_variance = moving_averages.assign_moving_average(
          moving_variance, variance, decay)
      tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance)
    else:
      # Just use the moving_mean and moving_variance.
      mean = moving_mean
      variance = moving_variance
    # Normalize the activations.
    outputs = tf.nn.batch_normalization(
        inputs, mean, variance, beta, gamma, epsilon)
    outputs.set_shape(inputs.get_shape())
    if activation:
      outputs = activation(outputs)
    return outputs
Пример #55
0
 def testGetVariableWithSingleVar(self):
   with self.test_session():
     with tf.variable_scope('parent'):
       a = variables.variable('child', [5])
     self.assertEquals(a, variables.get_unique_variable('parent/child'))
Пример #56
0
def deconv2d(inputs,
             num_filters_out,
             kernel_size,
             stride=1,
             padding='SAME',
             activation=tf.nn.relu,
             stddev=0.01,
             bias=0.0,
             weight_decay=0,
             batch_norm_params=None,
             is_training=True,
             trainable=True,
             restore=True,
             scope=None,
             reuse=None):
    """conv2d_transpose"""
    with tf.variable_op_scope([inputs], scope, 'Deconv', reuse=reuse):
        kernel_h, kernel_w = _two_element_tuple(kernel_size)
        stride_h, stride_w = _two_element_tuple(stride)
        num_filters_in = inputs.get_shape().dims[-1].value
        weights_shape = [kernel_h, kernel_w, num_filters_out, num_filters_in]
        weights_initializer = tf.truncated_normal_initializer(stddev=stddev)
        l2_regularizer = None
        if weight_decay and weight_decay > 0:
            l2_regularizer = losses.l2_regularizer(weight_decay)
        weights = variables.variable('weights',
                                     shape=weights_shape,
                                     initializer=weights_initializer,
                                     regularizer=l2_regularizer,
                                     trainable=trainable,
                                     restore=restore)

        inputs_shape = array_ops.shape(inputs)
        batch_size = inputs_shape[0]
        height = inputs_shape[1]
        width = inputs_shape[2]

        def get_deconv_dim(dim_size, stride_size, kernel_size, padding):
            if isinstance(dim_size, ops.Tensor):
                dim_size = math_ops.mul(dim_size, stride_size)
            elif dim_size is not None:
                dim_size *= stride_size
            if padding == 'VALID' and dim_size is not None:
                dim_size += max(kernel_size - stride_size, 0)
            return dim_size

        out_height = get_deconv_dim(height, stride_h, kernel_h, padding)
        out_width = get_deconv_dim(width, stride_w, kernel_w, padding)

        output_shape = array_ops.pack(
            [batch_size, out_height, out_width, num_filters_out])

        outputs = tf.nn.conv2d_transpose(inputs,
                                         weights,
                                         output_shape,
                                         strides=[1, stride_h, stride_w, 1],
                                         padding=padding)

        # Infer the static output shape:
        out_shape = inputs.get_shape().as_list()
        out_shape[-1] = num_filters_out
        out_shape[1] = get_deconv_dim(out_shape[1], stride_h, kernel_h,
                                      padding)
        out_shape[2] = get_deconv_dim(out_shape[2], stride_w, kernel_w,
                                      padding)
        outputs.set_shape(out_shape)

        if batch_norm_params is not None:
            with scopes.arg_scope([batch_norm],
                                  is_training=is_training,
                                  trainable=trainable,
                                  restore=restore):
                outputs = batch_norm(outputs, **batch_norm_params)
        else:
            outputs = outputs

        if activation: outputs = activation(outputs)
        return outputs
Пример #57
0
 def testVariableCollections(self):
     with self.test_session():
         a = variables.variable('a', [], collections=['A', 'C'])
         b = variables.variable('b', [], collections=['B', 'C'])
         self.assertEquals(a, tf.get_collection('A')[0])
         self.assertEquals(b, tf.get_collection('B')[0])
Пример #58
0
def batch_norm(inputs,
               decay=0.999,
               center=True,
               scale=False,
               epsilon=0.001,
               moving_vars='moving_vars',
               activation=None,
               is_training=True,
               trainable=True,
               restore=True,
               scope=None,
               reuse=None):

    inputs_shape = inputs.get_shape()
    with tf.variable_scope(scope, 'BatchNorm', [inputs], reuse=reuse):
        axis = list(range(len(inputs_shape) - 1))
        params_shape = inputs_shape[-1:]
        # Allocate parameters for the beta and gamma of the normalization.
        beta, gamma = None, None
        if center:
            beta = variables.variable('beta',
                                      params_shape,
                                      initializer=tf.zeros_initializer(),
                                      trainable=trainable,
                                      restore=restore)
        if scale:
            gamma = variables.variable('gamma',
                                       params_shape,
                                       initializer=tf.ones_initializer(),
                                       trainable=trainable,
                                       restore=restore)
        # Create moving_mean and moving_variance add them to
        # GraphKeys.MOVING_AVERAGE_VARIABLES collections.
        moving_collections = [
            moving_vars, tf.GraphKeys.MOVING_AVERAGE_VARIABLES
        ]
        moving_mean = variables.variable('moving_mean',
                                         params_shape,
                                         initializer=tf.zeros_initializer(),
                                         trainable=False,
                                         restore=restore,
                                         collections=moving_collections)
        moving_variance = variables.variable('moving_variance',
                                             params_shape,
                                             initializer=tf.ones_initializer(),
                                             trainable=False,
                                             restore=restore,
                                             collections=moving_collections)
        if is_training:
            # Calculate the moments based on the individual batch.
            mean, variance = tf.nn.moments(inputs, axis)

            update_moving_mean = moving_averages.assign_moving_average(
                moving_mean, mean, decay)
            tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean)
            update_moving_variance = moving_averages.assign_moving_average(
                moving_variance, variance, decay)
            tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance)
        else:
            # Just use the moving_mean and moving_variance.
            mean = moving_mean
            variance = moving_variance
        # Normalize the activations.
        outputs = tf.nn.batch_normalization(inputs, mean, variance, beta,
                                            gamma, epsilon)
        outputs.set_shape(inputs.get_shape())
        if activation:
            outputs = activation(outputs)
        return outputs