예제 #1
0
 def testL2RegularizerWithScope(self):
   with self.test_session():
     shape = [5, 5, 5]
     num_elem = 5 * 5 * 5
     tensor = tf.constant(1.0, shape=shape)
     loss = losses.l2_regularizer(scope='L2')(tensor)
     self.assertEquals(loss.op.name, 'L2/value')
     self.assertAlmostEqual(loss.eval(), num_elem / 2, 5)
예제 #2
0
 def testL2RegularizerWithScope(self):
     with self.test_session():
         shape = [5, 5, 5]
         num_elem = 5 * 5 * 5
         tensor = tf.constant(1.0, shape=shape)
         loss = losses.l2_regularizer(scope='L2')(tensor)
         self.assertEquals(loss.op.name, 'L2/value')
         self.assertAlmostEqual(loss.eval(), num_elem / 2, 5)
예제 #3
0
 def testL2RegularizerWithWeight(self):
     with self.test_session():
         shape = [5, 5, 5]
         num_elem = 5 * 5 * 5
         tensor = tf.constant(1.0, shape=shape)
         weight = 0.01
         loss = losses.l2_regularizer(weight)(tensor)
         self.assertEquals(loss.op.name, 'L2Regularizer/value')
         self.assertAlmostEqual(loss.eval(), num_elem * weight / 2, 5)
예제 #4
0
 def testL2RegularizerWithWeight(self):
   with self.test_session():
     shape = [5, 5, 5]
     num_elem = 5 * 5 * 5
     tensor = tf.constant(1.0, shape=shape)
     weight = 0.01
     loss = losses.l2_regularizer(weight)(tensor)
     self.assertEquals(loss.op.name, 'L2Regularizer/value')
     self.assertAlmostEqual(loss.eval(), num_elem * weight / 2, 5)
예제 #5
0
def fc(inputs,
       num_units_out,
       activation=tf.nn.relu,
       stddev=0.01,
       bias=0.0,
       weight_decay=0,
       batch_norm_params=None,
       is_training=True,
       trainable=True,
       restore=True,
       scope=None,
       reuse=None):
    """Adds a fully connected layer followed by an optional batch_norm layer.

  FC creates a variable called 'weights', representing the fully connected
  weight matrix, that is multiplied by the input. If `batch_norm` is None, a
  second variable called 'biases' is added to the result of the initial
  vector-matrix multiplication.

  Args:
    inputs: a [B x N] tensor where B is the batch size and N is the number of
            input units in the layer.
    num_units_out: the number of output units in the layer.
    activation: activation function.
    stddev: the standard deviation for the weights.
    bias: the initial value of the biases.
    weight_decay: the weight decay.
    batch_norm_params: parameters for the batch_norm. If is None don't use it.
    is_training: whether or not the model is in training mode.
    trainable: whether or not the variables should be trainable or not.
    restore: whether or not the variables should be marked for restore.
    scope: Optional scope for variable_op_scope.
    reuse: whether or not the layer and its variables should be reused. To be
      able to reuse the layer scope must be given.

  Returns:
     the tensor variable representing the result of the series of operations.
  """
    with tf.variable_op_scope([inputs], scope, 'FC', reuse=reuse):
        num_units_in = inputs.get_shape()[1]
        weights_shape = [num_units_in, num_units_out]
        weights_initializer = tf.truncated_normal_initializer(stddev=stddev)
        l2_regularizer = None
        if weight_decay and weight_decay > 0:
            l2_regularizer = losses.l2_regularizer(weight_decay)
        weights = variables.variable('weights',
                                     shape=weights_shape,
                                     initializer=weights_initializer,
                                     regularizer=l2_regularizer,
                                     trainable=trainable,
                                     restore=restore)
        if batch_norm_params is not None:
            outputs = tf.matmul(inputs, weights)
            with scopes.arg_scope([batch_norm],
                                  is_training=is_training,
                                  trainable=trainable,
                                  restore=restore):
                outputs = batch_norm(outputs, **batch_norm_params)
        else:
            bias_shape = [
                num_units_out,
            ]
            bias_initializer = tf.constant_initializer(bias)
            biases = variables.variable('biases',
                                        shape=bias_shape,
                                        initializer=bias_initializer,
                                        trainable=trainable,
                                        restore=restore)
            outputs = tf.nn.xw_plus_b(inputs, weights, biases)
        if activation:
            outputs = activation(outputs)
        return outputs
예제 #6
0
def conv2d(inputs,
           num_filters_out,
           kernel_size,
           stride=1,
           padding='SAME',
           activation=tf.nn.relu,
           stddev=0.01,
           bias=0.0,
           weight_decay=0,
           batch_norm_params=None,
           is_training=True,
           trainable=True,
           restore=True,
           scope=None,
           reuse=None,
           init=None):
    """Adds a 2D convolution followed by an optional batch_norm layer.

  conv2d creates a variable called 'weights', representing the convolutional
  kernel, that is convolved with the input. If `batch_norm_params` is None, a
  second variable called 'biases' is added to the result of the convolution
  operation.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_filters_out: the number of output filters.
    kernel_size: a list of length 2: [kernel_height, kernel_width] of
      of the filters. Can be an int if both values are the same.
    stride: a list of length 2: [stride_height, stride_width].
      Can be an int if both strides are the same.  Note that presently
      both strides must have the same value.
    padding: one of 'VALID' or 'SAME'.
    activation: activation function.
    stddev: standard deviation of the truncated guassian weight distribution.
    bias: the initial value of the biases.
    weight_decay: the weight decay.
    batch_norm_params: parameters for the batch_norm. If is None don't use it.
    is_training: whether or not the model is in training mode.
    trainable: whether or not the variables should be trainable or not.
    restore: whether or not the variables should be marked for restore.
    scope: Optional scope for variable_op_scope.
    reuse: whether or not the layer and its variables should be reused. To be
      able to reuse the layer scope must be given.
  Returns:
    a tensor representing the output of the operation.

  """
    with tf.variable_op_scope([inputs], scope, 'Conv', reuse=reuse):
        kernel_h, kernel_w = _two_element_tuple(kernel_size)
        stride_h, stride_w = _two_element_tuple(stride)
        num_filters_in = inputs.get_shape()[-1]
        weights_shape = [kernel_h, kernel_w, num_filters_in, num_filters_out]
        weights_initializer = None
        if init == None:
            weights_initializer = tf.truncated_normal_initializer(
                stddev=stddev)
        else:
            weights_initializer = tf.constant(init['weights'])
            weights_shape = None
        l2_regularizer = None
        if weight_decay and weight_decay > 0:
            l2_regularizer = losses.l2_regularizer(weight_decay)
        weights = variables.variable('weights',
                                     shape=weights_shape,
                                     initializer=weights_initializer,
                                     regularizer=l2_regularizer,
                                     trainable=trainable,
                                     restore=restore)

        conv = tf.nn.conv2d(inputs,
                            weights, [1, stride_h, stride_w, 1],
                            padding=padding)
        if batch_norm_params is not None:
            with scopes.arg_scope([batch_norm],
                                  is_training=is_training,
                                  trainable=trainable,
                                  restore=restore):
                outputs = batch_norm(conv, **batch_norm_params)
        else:
            bias_shape = [
                num_filters_out,
            ]
            bias_initializer = None
            if init == None:
                bias_initializer = tf.constant_initializer(bias)
            else:
                bias_initializer = tf.constant(init['biases'])
                bias_shape = None
            biases = variables.variable('biases',
                                        shape=bias_shape,
                                        initializer=bias_initializer,
                                        trainable=trainable,
                                        restore=restore)
            outputs = tf.nn.bias_add(conv, biases)
        if activation:
            outputs = activation(outputs)
        return outputs
예제 #7
0
파일: ops.py 프로젝트: Vaan5/piecewisecrf
def fc(inputs,
       num_units_out,
       activation=tf.nn.relu,
       stddev=0.01,
       bias=0.0,
       weight_decay=0,
       batch_norm_params=None,
       is_training=True,
       trainable=True,
       restore=True,
       scope=None,
       reuse=None):
  """Adds a fully connected layer followed by an optional batch_norm layer.

  FC creates a variable called 'weights', representing the fully connected
  weight matrix, that is multiplied by the input. If `batch_norm` is None, a
  second variable called 'biases' is added to the result of the initial
  vector-matrix multiplication.

  Args:
    inputs: a [B x N] tensor where B is the batch size and N is the number of
            input units in the layer.
    num_units_out: the number of output units in the layer.
    activation: activation function.
    stddev: the standard deviation for the weights.
    bias: the initial value of the biases.
    weight_decay: the weight decay.
    batch_norm_params: parameters for the batch_norm. If is None don't use it.
    is_training: whether or not the model is in training mode.
    trainable: whether or not the variables should be trainable or not.
    restore: whether or not the variables should be marked for restore.
    scope: Optional scope for variable_op_scope.
    reuse: whether or not the layer and its variables should be reused. To be
      able to reuse the layer scope must be given.

  Returns:
     the tensor variable representing the result of the series of operations.
  """
  with tf.variable_op_scope([inputs], scope, 'FC', reuse=reuse):
    num_units_in = inputs.get_shape()[1]
    weights_shape = [num_units_in, num_units_out]
    weights_initializer = tf.truncated_normal_initializer(stddev=stddev)
    l2_regularizer = None
    if weight_decay and weight_decay > 0:
      l2_regularizer = losses.l2_regularizer(weight_decay)
    weights = variables.variable('weights',
                                 shape=weights_shape,
                                 initializer=weights_initializer,
                                 regularizer=l2_regularizer,
                                 trainable=trainable,
                                 restore=restore)
    if batch_norm_params is not None:
      outputs = tf.matmul(inputs, weights)
      with scopes.arg_scope([batch_norm], is_training=is_training,
                            trainable=trainable, restore=restore):
        outputs = batch_norm(outputs, **batch_norm_params)
    else:
      bias_shape = [num_units_out,]
      bias_initializer = tf.constant_initializer(bias)
      biases = variables.variable('biases',
                                  shape=bias_shape,
                                  initializer=bias_initializer,
                                  trainable=trainable,
                                  restore=restore)
      outputs = tf.nn.xw_plus_b(inputs, weights, biases)
    if activation:
      outputs = activation(outputs)
    return outputs
예제 #8
0
파일: ops.py 프로젝트: Vaan5/piecewisecrf
def conv2d(inputs,
           num_filters_out,
           kernel_size,
           stride=1,
           padding='SAME',
           activation=tf.nn.relu,
           stddev=0.01,
           bias=0.0,
           weight_decay=0,
           batch_norm_params=None,
           is_training=True,
           trainable=True,
           restore=True,
           scope=None,
           reuse=None,
           init=None):
  """Adds a 2D convolution followed by an optional batch_norm layer.

  conv2d creates a variable called 'weights', representing the convolutional
  kernel, that is convolved with the input. If `batch_norm_params` is None, a
  second variable called 'biases' is added to the result of the convolution
  operation.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_filters_out: the number of output filters.
    kernel_size: a list of length 2: [kernel_height, kernel_width] of
      of the filters. Can be an int if both values are the same.
    stride: a list of length 2: [stride_height, stride_width].
      Can be an int if both strides are the same.  Note that presently
      both strides must have the same value.
    padding: one of 'VALID' or 'SAME'.
    activation: activation function.
    stddev: standard deviation of the truncated guassian weight distribution.
    bias: the initial value of the biases.
    weight_decay: the weight decay.
    batch_norm_params: parameters for the batch_norm. If is None don't use it.
    is_training: whether or not the model is in training mode.
    trainable: whether or not the variables should be trainable or not.
    restore: whether or not the variables should be marked for restore.
    scope: Optional scope for variable_op_scope.
    reuse: whether or not the layer and its variables should be reused. To be
      able to reuse the layer scope must be given.
  Returns:
    a tensor representing the output of the operation.

  """
  with tf.variable_op_scope([inputs], scope, 'Conv', reuse=reuse):
    kernel_h, kernel_w = _two_element_tuple(kernel_size)
    stride_h, stride_w = _two_element_tuple(stride)
    num_filters_in = inputs.get_shape()[-1]
    weights_shape = [kernel_h, kernel_w,
                     num_filters_in, num_filters_out]
    weights_initializer = None
    if init == None:
      weights_initializer = tf.truncated_normal_initializer(stddev=stddev)
    else:
      weights_initializer = tf.constant(init['weights'])
      weights_shape = None
    l2_regularizer = None
    if weight_decay and weight_decay > 0:
      l2_regularizer = losses.l2_regularizer(weight_decay)
    weights = variables.variable('weights',
                                 shape=weights_shape,
                                 initializer=weights_initializer,
                                 regularizer=l2_regularizer,
                                 trainable=trainable,
                                 restore=restore)

    conv = tf.nn.conv2d(inputs, weights, [1, stride_h, stride_w, 1],
                        padding=padding)
    if batch_norm_params is not None:
      with scopes.arg_scope([batch_norm], is_training=is_training,
                            trainable=trainable, restore=restore):
        outputs = batch_norm(conv, **batch_norm_params)
    else:
      bias_shape = [num_filters_out,]
      bias_initializer = None
      if init == None:
        bias_initializer = tf.constant_initializer(bias)
      else:
        bias_initializer = tf.constant(init['biases'])
        bias_shape = None
      biases = variables.variable('biases',
                                  shape=bias_shape,
                                  initializer=bias_initializer,
                                  trainable=trainable,
                                  restore=restore)
      outputs = tf.nn.bias_add(conv, biases)
    if activation:
      outputs = activation(outputs)
    return outputs