Exemplo n.º 1
0
 def _get_conv_layers(self, in_channel, conv):
     for stage_num, stage_param in enumerate(self.__conv_num_list):
         stage_name = "stage%d_" % stage_num
         filter_size = stage_param[0]
         layer_count = stage_param[1]
         for layer_num in xrange(layer_count):
             out_channel = stage_param[layer_num + 2]
             with tf.variable_scope(stage_name + "conv%d" % layer_num):
                 weights = tf.get_variable("weights",
                         initializer=tf.truncated_normal([filter_size, filter_size,
                             in_channel, out_channel],
                             stddev=math.sqrt(2.0 / (filter_size ** 2 * in_channel))))
                 biases = tf.get_variable("biases",
                         initializer=tf.zeros([out_channel]))
                 conv = tf.nn.conv2d(conv, weights,
                         strides=[1, 1, 1, 1], padding="SAME")
                 conv = tf.nn.bias_add(conv, biases)
             with tf.variable_scope(stage_name + "bn%d" % layer_num):
                 bn = batch_normalize(conv, convnet=True)
             with tf.variable_scope(stage_name + "relu%d" % layer_num):
                 relu = tf.nn.relu(bn)
             in_channel = out_channel
             conv = relu
         with tf.variable_scope(stage_name + "pool"):
             pool = tf.nn.max_pool(conv, ksize=[1, 2, 2, 1],
                     strides=[1, 2, 2, 1], padding="SAME")
         conv = pool
     return conv
Exemplo n.º 2
0
def conv2d(tensor_in,
           n_filters,
           filter_shape,
           strides=None,
           padding='SAME',
           bias=True,
           activation=None,
           batch_norm=False):
    """Creates 2D convolutional subgraph with bank of filters.

  Uses tf.nn.conv2d under the hood.
  Creates a filter bank:
    [filter_shape[0], filter_shape[1], tensor_in[3], n_filters]
  and applies it to the input tensor.

  Args:
    tensor_in: input Tensor, 4D shape:
      [batch, in_height, in_width, in_depth].
    n_filters: number of filters in the bank.
    filter_shape: Shape of filters, a list of ints, 1-D of length 2.
    strides: A list of ints, 1-D of length 4. The stride of the sliding
      window for each dimension of input.
    padding: A string: 'SAME' or 'VALID'. The type of padding algorthim to use.
      See the [comment here]
      (https://www.tensorflow.org/api_docs/python/nn.html#convolution)
    bias: Boolean, if to add bias.
    activation: Activation Op, optional. If provided applied on the output.
    batch_norm: Whether to apply batch normalization.

  Returns:
    A Tensor with resulting convolution.
  """
    with vs.variable_scope('convolution'):
        if strides is None:
            strides = [1, 1, 1, 1]
        input_shape = tensor_in.get_shape()
        filter_shape = list(filter_shape) + [input_shape[3], n_filters]
        filters = vs.get_variable('filters', filter_shape, dtypes.float32)
        output = nn.conv2d(tensor_in, filters, strides, padding)
        if bias:
            bias_var = vs.get_variable('bias', [1, 1, 1, n_filters],
                                       dtypes.float32)
            output += bias_var
        if batch_norm:
            output = batch_normalize(output, convnet=True)
        if activation:
            output = activation(output)
        return output
Exemplo n.º 3
0
def conv2d(tensor_in,
           n_filters,
           filter_shape,
           strides=None,
           padding='SAME',
           bias=True,
           activation=None,
           batch_norm=False):
  """Creates 2D convolutional subgraph with bank of filters.

  Uses tf.nn.conv2d under the hood.
  Creates a filter bank:
    [filter_shape[0], filter_shape[1], tensor_in[3], n_filters]
  and applies it to the input tensor.

  Args:
    tensor_in: input Tensor, 4D shape:
      [batch, in_height, in_width, in_depth].
    n_filters: number of filters in the bank.
    filter_shape: Shape of filters, a list of ints, 1-D of length 2.
    strides: A list of ints, 1-D of length 4. The stride of the sliding
      window for each dimension of input.
    padding: A string: 'SAME' or 'VALID'. The type of padding algorthim to use.
      See the [comment here]
      (https://www.tensorflow.org/api_docs/python/nn.html#convolution)
    bias: Boolean, if to add bias.
    activation: Activation Op, optional. If provided applied on the output.
    batch_norm: Whether to apply batch normalization.

  Returns:
    A Tensor with resulting convolution.
  """
  with vs.variable_scope('convolution'):
    if strides is None:
      strides = [1, 1, 1, 1]
    input_shape = tensor_in.get_shape()
    filter_shape = list(filter_shape) + [input_shape[3], n_filters]
    filters = vs.get_variable('filters', filter_shape, dtypes.float32)
    output = nn.conv2d(tensor_in, filters, strides, padding)
    if bias:
      bias_var = vs.get_variable('bias', [1, 1, 1, n_filters], dtypes.float32)
      output += bias_var
    if batch_norm:
      output = batch_normalize(output, convnet=True)
    if activation:
      output = activation(output)
    return output