Пример #1
0
def pretrained_vgg16_2(image, num_blocks, tower_setup, freeze_c2=True):
  assert len(num_blocks) == 3
  l = tf.pad(image, [[0, 0], [2, 3], [2, 3], [0, 0]])

  l = Conv(name='conv0', inputs=[l], n_features=64, tower_setup=tower_setup, filter_size=(7, 7),
           activation='relu', padding='SAME', batch_norm=True, old_order=True).outputs[0]
  l = Conv(name='conv1', inputs=[l], n_features=64, tower_setup=tower_setup, filter_size=(7, 7),
           activation='relu', padding='SAME', batch_norm=True, old_order=True).outputs[0]

  l = tf.pad(l, [[0, 0], [0, 1], [0, 1], [0, 0]])
  l = max_pool(l, shape=[2, 2], strides=[2, 2], padding='VALID')

  l = Conv(name='conv2', inputs=[l], n_features=128, tower_setup=tower_setup, filter_size=(7, 7),
           activation='relu', padding='SAME', batch_norm=True, old_order=True).outputs[0]
  l = Conv(name='conv3', inputs=[l], n_features=128, tower_setup=tower_setup, filter_size=(7, 7),
           activation='relu', padding='SAME', batch_norm=True, old_order=True).outputs[0]

  l = tf.pad(l, [[0, 0], [0, 1], [0, 1], [0, 0]])
  l = max_pool(l, shape=[2, 2], strides=[2, 2], padding='VALID')

  l = Conv(name='conv4', inputs=[l], n_features=256, tower_setup=tower_setup, filter_size=(7, 7),
           activation='relu', padding='SAME', batch_norm=True, old_order=True).outputs[0]
  l = Conv(name='conv5', inputs=[l], n_features=256, tower_setup=tower_setup, filter_size=(7, 7),
           activation='relu', padding='SAME', batch_norm=True, old_order=True).outputs[0]
  l = Conv(name='conv6', inputs=[l], n_features=256, tower_setup=tower_setup, filter_size=(7, 7),
           activation='relu', padding='SAME', batch_norm=True, old_order=True).outputs[0]

  # l = tf.pad(l, [[0, 0], [0, 1], [0, 1], [0, 0]])
  # l = max_pool(l, shape=[2, 2], strides=[2, 2], padding='VALID')

  # l = Conv(name='conv7', inputs=[l], n_features=512, tower_setup=tower_setup, filter_size=(7, 7),
  #          activation='relu', padding='SAME', batch_norm=True, old_order=True).outputs[0]
  # l = Conv(name='conv8', inputs=[l], n_features=512, tower_setup=tower_setup, filter_size=(7, 7),
  #          activation='relu', padding='SAME', batch_norm=True, old_order=True).outputs[0]
  # l = Conv(name='conv9', inputs=[l], n_features=512, tower_setup=tower_setup, filter_size=(7, 7),
  #          activation='relu', padding='SAME', batch_norm=True, old_order=True).outputs[0]

  # l = tf.pad(l, [[0, 0], [0, 1], [0, 1], [0, 0]])
  # l = max_pool(l, shape=[2, 2], strides=[2, 2], padding='VALID')

  # l = Conv(name='conv10', inputs=[l], n_features=512, tower_setup=tower_setup, filter_size=(7, 7),
  #          activation='relu', padding='SAME', batch_norm=True, old_order=True).outputs[0]
  # l = Conv(name='conv11', inputs=[l], n_features=512, tower_setup=tower_setup, filter_size=(7, 7),
  #          activation='relu', padding='SAME', batch_norm=True, old_order=True).outputs[0]
  # l = Conv(name='conv12', inputs=[l], n_features=512, tower_setup=tower_setup, filter_size=(7, 7),
  #          activation='relu', padding='SAME', batch_norm=True, old_order=True).outputs[0]

  l = tf.pad(l, [[0, 0], [0, 1], [0, 1], [0, 0]])
  l = max_pool(l, shape=[2, 2], strides=[2, 2], padding='VALID')
  c2 = resnet_group(l, 'group0', 64, num_blocks[0], 1, tower_setup)
  if freeze_c2:
    c2 = tf.stop_gradient(c2)
  c3 = resnet_group(c2, 'group1', 128, num_blocks[1], 2, tower_setup)
  #if tower_setup.is_training:
  #  tf.add_to_collection('checkpoints', c3)
  c4 = resnet_group(c3, 'group2', 256, num_blocks[2], 2, tower_setup)

  return [c2, c3, c4]
Пример #2
0
 def __init__(self, name, inputs, pool_size=(2, 2), pool_strides=None):
   super().__init__()
   assert len(inputs) == 1
   pool_size = list(pool_size)
   inp = inputs[0]
   out = max_pool(inp, pool_size, pool_strides)
   self.outputs = [out]
Пример #3
0
  def __init__(self, name, inputs, n_features, tower_setup, filter_size=(3, 3), old_order=False,
               strides=(1, 1), dilation=None, pool_size=(1, 1), pool_strides=None, activation="relu", dropout=0.0,
               batch_norm=False, bias=False, batch_norm_decay=Layer.BATCH_NORM_DECAY_DEFAULT, l2=Layer.L2_DEFAULT,
               padding="SAME", W_initializer=None, b_initializer=None, freeze_batchnorm_override=None):
    super(Conv, self).__init__()
    # mind the order of dropout, conv, activation and batchnorm!
    # batchnorm -> activation -> dropout -> conv -> pool
    # if old_order: dropout -> conv -> batchnorm -> activation -> pool (used for example in tensorpack)
    curr, n_features_inp = prepare_input(inputs)
    filter_size = list(filter_size)
    strides = list(strides)
    pool_size = list(pool_size)
    if pool_strides is None:
      pool_strides = pool_size

    with tf.variable_scope(name):
      if isinstance(W_initializer, list):
        W_initializer = tf.constant_initializer(W_initializer)
      W = self.create_weight_variable("W", filter_size + [n_features_inp, n_features], l2, tower_setup,
                                      initializer=W_initializer)
      b = None
      if bias:
        if b_initializer is not None:
          b_initializer = tf.constant_initializer(b_initializer)
          b = self.create_bias_variable("b", [n_features], tower_setup, initializer=b_initializer)
        else:
          b = self.create_bias_variable("b", [n_features], tower_setup)

      if old_order:
        curr = apply_dropout(curr, dropout)
        if dilation is None:
          curr = conv2d(curr, W, strides, padding=padding)
        else:
          curr = conv2d_dilated(curr, W, dilation, padding=padding)
        if bias:
          curr += b
        if batch_norm:
          curr = self.create_and_apply_batch_norm(curr, n_features, batch_norm_decay, tower_setup, freeze_batchnorm_override=freeze_batchnorm_override)
        curr = get_activation(activation)(curr)
      else:
        if batch_norm:
          curr = self.create_and_apply_batch_norm(curr, n_features_inp, batch_norm_decay, tower_setup, freeze_batchnorm_override=freeze_batchnorm_override)
        curr = get_activation(activation)(curr)
        curr = apply_dropout(curr, dropout)
        if dilation is None:
          curr = conv2d(curr, W, strides, padding=padding)
        else:
          curr = conv2d_dilated(curr, W, dilation, padding=padding)
        if bias:
          curr += b

      if pool_size != [1, 1]:
        curr = max_pool(curr, pool_size, pool_strides)
    self.outputs = [curr]
Пример #4
0
def pretrained_xception_done(image, num_blocks, tower_setup, freeze_c2=True):
    assert len(num_blocks) == 3
    l = tf.pad(image, [[0, 0], [2, 3], [2, 3], [0, 0]])

    l = Conv(name='conv0',
             inputs=[l],
             n_features=32,
             tower_setup=tower_setup,
             filter_size=(3, 3),
             activation='relu',
             padding='VALID',
             batch_norm=True,
             old_order=True).outputs[0]
    l = Conv(name='conv1',
             inputs=[l],
             n_features=64,
             tower_setup=tower_setup,
             filter_size=(3, 3),
             activation='relu',
             padding='VALID',
             batch_norm=True,
             old_order=True).outputs[0]

    l = Conv(name='conv2',
             inputs=[l],
             n_features=128,
             tower_setup=tower_setup,
             filter_size=(3, 3),
             activation='relu',
             padding='VALID',
             batch_norm=True,
             old_order=True).outputs[0]
    l = Conv(name='conv3',
             inputs=[l],
             n_features=128,
             tower_setup=tower_setup,
             filter_size=(3, 3),
             activation='relu',
             padding='VALID',
             batch_norm=True,
             old_order=True).outputs[0]

    l = max_pool(l, shape=[3, 3], strides=[2, 2], padding='SAME')

    l = Conv(name='conv4',
             inputs=[l],
             n_features=256,
             tower_setup=tower_setup,
             filter_size=(3, 3),
             activation='relu',
             padding='VALID',
             batch_norm=True,
             old_order=True).outputs[0]
    l = Conv(name='conv5',
             inputs=[l],
             n_features=256,
             tower_setup=tower_setup,
             filter_size=(3, 3),
             activation='relu',
             padding='VALID',
             batch_norm=True,
             old_order=True).outputs[0]

    l = max_pool(l, shape=[3, 3], strides=[2, 2], padding='SAME')

    l = Conv(name='conv6',
             inputs=[l],
             n_features=728,
             tower_setup=tower_setup,
             filter_size=(3, 3),
             activation='relu',
             padding='VALID',
             batch_norm=True,
             old_order=True).outputs[0]
    l = Conv(name='conv7',
             inputs=[l],
             n_features=728,
             tower_setup=tower_setup,
             filter_size=(3, 3),
             activation='relu',
             padding='VALID',
             batch_norm=True,
             old_order=True).outputs[0]

    l = max_pool(l, shape=[3, 3], strides=[2, 2], padding='SAME')

    l = Conv(name='conv8',
             inputs=[l],
             n_features=728,
             tower_setup=tower_setup,
             filter_size=(3, 3),
             activation='relu',
             padding='VALID',
             batch_norm=True,
             old_order=True).outputs[0]
    l = Conv(name='conv9',
             inputs=[l],
             n_features=728,
             tower_setup=tower_setup,
             filter_size=(3, 3),
             activation='relu',
             padding='VALID',
             batch_norm=True,
             old_order=True).outputs[0]
    l = Conv(name='conv10',
             inputs=[l],
             n_features=728,
             tower_setup=tower_setup,
             filter_size=(3, 3),
             activation='relu',
             padding='VALID',
             batch_norm=True,
             old_order=True).outputs[0]

    l = Conv(name='conv11',
             inputs=[l],
             n_features=728,
             tower_setup=tower_setup,
             filter_size=(3, 3),
             activation='relu',
             padding='VALID',
             batch_norm=True,
             old_order=True).outputs[0]
    l = Conv(name='conv12',
             inputs=[l],
             n_features=1024,
             tower_setup=tower_setup,
             filter_size=(3, 3),
             activation='relu',
             padding='VALID',
             batch_norm=True,
             old_order=True).outputs[0]

    l = max_pool(l, shape=[3, 3], strides=[2, 2], padding='SAME')

    l = Conv(name='conv13',
             inputs=[l],
             n_features=1536,
             tower_setup=tower_setup,
             filter_size=(3, 3),
             activation='relu',
             padding='VALID',
             batch_norm=True,
             old_order=True).outputs[0]
    l = Conv(name='conv14',
             inputs=[l],
             n_features=2048,
             tower_setup=tower_setup,
             filter_size=(3, 3),
             activation='relu',
             padding='VALID',
             batch_norm=True,
             old_order=True).outputs[0]

    l = max_pool(l, shape=[3, 3], strides=[2, 2], padding='SAME')

    l = Conv(name='conv15',
             inputs=[l],
             n_features=2048,
             tower_setup=tower_setup,
             filter_size=(1, 1),
             activation='relu',
             padding='VALID',
             batch_norm=True,
             old_order=True).outputs[0]

    c2 = resnet_group(l, 'group0', 64, num_blocks[0], 1, tower_setup)
    if freeze_c2:
        c2 = tf.stop_gradient(c2)
    c3 = resnet_group(c2, 'group1', 128, num_blocks[1], 2, tower_setup)
    c4 = resnet_group(c3, 'group2', 256, num_blocks[2], 2, tower_setup)

    return [c2, c3, c4]