Esempio n. 1
0
 def testVariableCollectionsWithArgScopeNested(self):
     with self.test_session():
         with scopes.arg_scope([variables.variable], collections='A'):
             a = variables.variable('a', [])
             with scopes.arg_scope([variables.variable], collections='B'):
                 b = variables.variable('b', [])
         self.assertEquals(a, tf.get_collection('A')[0])
         self.assertEquals(b, tf.get_collection('B')[0])
Esempio n. 2
0
 def testVariableCollectionsWithArgScopeNonNested(self):
     with self.test_session():
         with scopes.arg_scope([variables.variable], collections='A'):
             a = variables.variable('a', [])
         with scopes.arg_scope([variables.variable], collections='B'):
             b = variables.variable('b', [])
         variables.variable('c', [])
         self.assertListEqual([a], tf.get_collection('A'))
         self.assertListEqual([b], tf.get_collection('B'))
 def testNestedArgScope(self):
     func1_args = (0, )
     func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
     with scopes.arg_scope([func1], a=1, b=None, c=[1]):
         args, kwargs = func1(0)
         self.assertTupleEqual(args, func1_args)
         self.assertDictEqual(kwargs, func1_kwargs)
         func1_kwargs['b'] = 2
         with scopes.arg_scope([func1], b=2):
             args, kwargs = func1(0)
             self.assertTupleEqual(args, func1_args)
             self.assertDictEqual(kwargs, func1_kwargs)
Esempio n. 4
0
 def testVariableRestoreWithArgScopeNested(self):
     with self.test_session():
         with scopes.arg_scope([variables.variable], restore=True):
             a = variables.variable('a', [])
             with scopes.arg_scope([variables.variable],
                                   trainable=False,
                                   collections=['A', 'B']):
                 b = variables.variable('b', [])
             c = variables.variable('c', [])
         self.assertListEqual([a, b, c],
                              variables.get_variables_to_restore())
         self.assertListEqual([a, c], tf.trainable_variables())
         self.assertListEqual([b], tf.get_collection('A'))
         self.assertListEqual([b], tf.get_collection('B'))
 def testOverwriteArgScope(self):
     func1_args = (0, )
     func1_kwargs = {'a': 1, 'b': 2, 'c': [1]}
     with scopes.arg_scope([func1], a=1, b=None, c=[1]):
         args, kwargs = func1(0, b=2)
         self.assertTupleEqual(args, func1_args)
         self.assertDictEqual(kwargs, func1_kwargs)
Esempio n. 6
0
    def testVariableWithVariableDeviceChooser(self):

        with tf.Graph().as_default():
            device_fn = variables.VariableDeviceChooser(
                num_parameter_servers=2)
            with scopes.arg_scope([variables.variable], device=device_fn):
                a = variables.variable('a', [])
                b = variables.variable('b', [])
                c = variables.variable('c', [], device='cpu:12')
                d = variables.variable('d', [])
                with tf.device('cpu:99'):
                    e_init = tf.constant(12)
                e = variables.variable('e', initializer=e_init)
            # The values below highlight how the VariableDeviceChooser puts initial
            # values on the same device as the variable job.
            self.assertDeviceEqual(a.device, '/job:ps/task:0/cpu:0')
            self.assertDeviceEqual(a.initial_value.device, a.device)
            self.assertDeviceEqual(b.device, '/job:ps/task:1/cpu:0')
            self.assertDeviceEqual(b.initial_value.device, b.device)
            self.assertDeviceEqual(c.device, '/cpu:12')
            self.assertDeviceEqual(c.initial_value.device, c.device)
            self.assertDeviceEqual(d.device, '/job:ps/task:0/cpu:0')
            self.assertDeviceEqual(d.initial_value.device, d.device)
            self.assertDeviceEqual(e.device, '/job:ps/task:1/cpu:0')
            self.assertDeviceEqual(e.initial_value.device, '/cpu:99')
Esempio n. 7
0
    def testVariableWithDeviceFunction(self):
        class DevFn(object):
            def __init__(self):
                self.counter = -1

            def __call__(self, op):
                self.counter += 1
                return 'cpu:%d' % self.counter

        with self.test_session():
            with scopes.arg_scope([variables.variable], device=DevFn()):
                a = variables.variable('a', [])
                b = variables.variable('b', [])
                c = variables.variable('c', [], device='cpu:12')
                d = variables.variable('d', [])
                with tf.device('cpu:99'):
                    e_init = tf.constant(12)
                e = variables.variable('e', initializer=e_init)
            self.assertDeviceEqual(a.device, 'cpu:0')
            self.assertDeviceEqual(a.initial_value.device, 'cpu:0')
            self.assertDeviceEqual(b.device, 'cpu:1')
            self.assertDeviceEqual(b.initial_value.device, 'cpu:1')
            self.assertDeviceEqual(c.device, 'cpu:12')
            self.assertDeviceEqual(c.initial_value.device, 'cpu:12')
            self.assertDeviceEqual(d.device, 'cpu:2')
            self.assertDeviceEqual(d.initial_value.device, 'cpu:2')
            self.assertDeviceEqual(e.device, 'cpu:3')
            self.assertDeviceEqual(e.initial_value.device, 'cpu:99')
 def testSharedArgScopeTuple(self):
     func1_args = (0, )
     func1_kwargs = {'a': 1, 'b': None, 'c': [1]}
     with scopes.arg_scope((func1, func2), a=1, b=None, c=[1]):
         args, kwargs = func1(0)
         self.assertTupleEqual(args, func1_args)
         self.assertDictEqual(kwargs, func1_kwargs)
         args, kwargs = func2(0)
         self.assertTupleEqual(args, func1_args)
         self.assertDictEqual(kwargs, func1_kwargs)
Esempio n. 9
0
    def testVariableWithVariableDeviceChooser(self):

        with tf.Graph().as_default():
            device_fn = variables.VariableDeviceChooser()
            with scopes.arg_scope([variables.global_step], device=device_fn):
                gs = variables.global_step()
                gs2 = variables.global_step()
                self.assertEquals(gs, gs2)
                self.assertDeviceEqual(gs.device, 'cpu:0')
                self.assertDeviceEqual(gs.initial_value.device, gs.device)
                self.assertDeviceEqual(gs2.device, 'cpu:0')
                self.assertDeviceEqual(gs2.initial_value.device, gs2.device)
Esempio n. 10
0
 def testReplicaDeviceSetter(self):
     device_fn = tf.train.replica_device_setter(2)
     with tf.Graph().as_default():
         with scopes.arg_scope([variables.global_step], device=device_fn):
             gs = variables.global_step()
             gs2 = variables.global_step()
             self.assertEquals(gs, gs2)
             self.assertDeviceEqual(gs.device, '/job:ps/task:0')
             self.assertDeviceEqual(gs.initial_value.device,
                                    '/job:ps/task:0')
             self.assertDeviceEqual(gs2.device, '/job:ps/task:0')
             self.assertDeviceEqual(gs2.initial_value.device,
                                    '/job:ps/task:0')
Esempio n. 11
0
    def testDeviceFn(self):
        class DevFn(object):
            def __init__(self):
                self.counter = -1

            def __call__(self, op):
                self.counter += 1
                return '/cpu:%d' % self.counter

        with tf.Graph().as_default():
            with scopes.arg_scope([variables.global_step], device=DevFn()):
                gs = variables.global_step()
                gs2 = variables.global_step()
            self.assertDeviceEqual(gs.device, '/cpu:0')
            self.assertEquals(gs, gs2)
            self.assertDeviceEqual(gs2.device, '/cpu:0')
Esempio n. 12
0
def fc(inputs,
       num_units_out,
       activation=tf.nn.relu,
       stddev=0.01,
       bias=0.0,
       weight_decay=0,
       batch_norm_params=None,
       is_training=True,
       trainable=True,
       restore=True,
       scope=None,
       reuse=None):
    """Adds a fully connected layer followed by an optional batch_norm layer.

  FC creates a variable called 'weights', representing the fully connected
  weight matrix, that is multiplied by the input. If `batch_norm` is None, a
  second variable called 'biases' is added to the result of the initial
  vector-matrix multiplication.

  Args:
    inputs: a [B x N] tensor where B is the batch size and N is the number of
            input units in the layer.
    num_units_out: the number of output units in the layer.
    activation: activation function.
    stddev: the standard deviation for the weights.
    bias: the initial value of the biases.
    weight_decay: the weight decay.
    batch_norm_params: parameters for the batch_norm. If is None don't use it.
    is_training: whether or not the model is in training mode.
    trainable: whether or not the variables should be trainable or not.
    restore: whether or not the variables should be marked for restore.
    scope: Optional scope for variable_scope.
    reuse: whether or not the layer and its variables should be reused. To be
      able to reuse the layer scope must be given.

  Returns:
     the tensor variable representing the result of the series of operations.
  """
    with tf.variable_scope(scope, 'FC', [inputs], reuse=reuse):
        num_units_in = inputs.get_shape()[1]
        weights_shape = [num_units_in, num_units_out]
        weights_initializer = tf.truncated_normal_initializer(stddev=stddev)
        l2_regularizer = None
        if weight_decay and weight_decay > 0:
            l2_regularizer = losses.l2_regularizer(weight_decay)
        weights = variables.variable('weights',
                                     shape=weights_shape,
                                     initializer=weights_initializer,
                                     regularizer=l2_regularizer,
                                     trainable=trainable,
                                     restore=restore)
        if batch_norm_params is not None:
            outputs = tf.matmul(inputs, weights)
            with scopes.arg_scope([batch_norm],
                                  is_training=is_training,
                                  trainable=trainable,
                                  restore=restore):
                outputs = batch_norm(outputs, **batch_norm_params)
        else:
            bias_shape = [
                num_units_out,
            ]
            bias_initializer = tf.constant_initializer(bias)
            biases = variables.variable('biases',
                                        shape=bias_shape,
                                        initializer=bias_initializer,
                                        trainable=trainable,
                                        restore=restore)
            outputs = tf.nn.xw_plus_b(inputs, weights, biases)
        if activation:
            outputs = activation(outputs)
        return outputs
Esempio n. 13
0
def conv2d(inputs,
           num_filters_out,
           kernel_size,
           stride=1,
           padding='SAME',
           activation=tf.nn.relu,
           stddev=0.01,
           bias=0.0,
           weight_decay=0,
           batch_norm_params=None,
           is_training=True,
           trainable=True,
           restore=True,
           scope=None,
           reuse=None):
    """Adds a 2D convolution followed by an optional batch_norm layer.

  conv2d creates a variable called 'weights', representing the convolutional
  kernel, that is convolved with the input. If `batch_norm_params` is None, a
  second variable called 'biases' is added to the result of the convolution
  operation.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_filters_out: the number of output filters.
    kernel_size: a list of length 2: [kernel_height, kernel_width] of
      of the filters. Can be an int if both values are the same.
    stride: a list of length 2: [stride_height, stride_width].
      Can be an int if both strides are the same.  Note that presently
      both strides must have the same value.
    padding: one of 'VALID' or 'SAME'.
    activation: activation function.
    stddev: standard deviation of the truncated guassian weight distribution.
    bias: the initial value of the biases.
    weight_decay: the weight decay.
    batch_norm_params: parameters for the batch_norm. If is None don't use it.
    is_training: whether or not the model is in training mode.
    trainable: whether or not the variables should be trainable or not.
    restore: whether or not the variables should be marked for restore.
    scope: Optional scope for variable_scope.
    reuse: whether or not the layer and its variables should be reused. To be
      able to reuse the layer scope must be given.
  Returns:
    a tensor representing the output of the operation.

  """
    with tf.variable_scope(scope, 'Conv', [inputs], reuse=reuse):
        kernel_h, kernel_w = _two_element_tuple(kernel_size)
        stride_h, stride_w = _two_element_tuple(stride)
        num_filters_in = inputs.get_shape()[-1]
        weights_shape = [kernel_h, kernel_w, num_filters_in, num_filters_out]
        weights_initializer = tf.truncated_normal_initializer(stddev=stddev)
        l2_regularizer = None
        if weight_decay and weight_decay > 0:
            l2_regularizer = losses.l2_regularizer(weight_decay)
        weights = variables.variable('weights',
                                     shape=weights_shape,
                                     initializer=weights_initializer,
                                     regularizer=l2_regularizer,
                                     trainable=trainable,
                                     restore=restore)
        conv = tf.nn.conv2d(inputs,
                            weights, [1, stride_h, stride_w, 1],
                            padding=padding)
        if batch_norm_params is not None:
            with scopes.arg_scope([batch_norm],
                                  is_training=is_training,
                                  trainable=trainable,
                                  restore=restore):
                outputs = batch_norm(conv, **batch_norm_params)
        else:
            bias_shape = [
                num_filters_out,
            ]
            bias_initializer = tf.constant_initializer(bias)
            biases = variables.variable('biases',
                                        shape=bias_shape,
                                        initializer=bias_initializer,
                                        trainable=trainable,
                                        restore=restore)
            outputs = tf.nn.bias_add(conv, biases)
        if activation:
            outputs = activation(outputs)
        return outputs
Esempio n. 14
0
 def testDevice(self):
     with tf.Graph().as_default():
         with scopes.arg_scope([variables.global_step], device='/gpu:0'):
             gs = variables.global_step()
         self.assertDeviceEqual(gs.device, '/gpu:0')