Пример #1
0
def inference(images, num_classes, scope):
    with tf.op_scope([images], scope):
        with scopes.arg_scope([ops.conv2d, ops.fc, ops.dropout],
                              is_training=True):
            with tf.variable_scope('conv1'):
                conv1 = ops.conv2d(images,
                                   num_filters_out=64,
                                   kernel_size=[11, 11],
                                   stride=4,
                                   stddev=1e-1)
            with tf.variable_scope('pool1'):
                pool1 = ops.max_pool(conv1,
                                     kernel_size=3,
                                     stride=2,
                                     padding='VALID')
            with tf.variable_scope('conv2'):
                conv2 = ops.conv2d(pool1,
                                   num_filters_out=192,
                                   kernel_size=1,
                                   stride=1,
                                   stddev=1e-1)
            with tf.variable_scope('pool2'):
                pool2 = ops.max_pool(conv2,
                                     kernel_size=3,
                                     stride=2,
                                     padding='VALID')
            with tf.variable_scope('conv3'):
                conv3 = ops.conv2d(pool2,
                                   num_filters_out=384,
                                   kernel_size=3,
                                   stride=1,
                                   stddev=1e-1)
            with tf.variable_scope('conv4'):
                conv4 = ops.conv2d(conv3,
                                   num_filters_out=256,
                                   kernel_size=3,
                                   stride=1,
                                   stddev=1e-1)
            with tf.variable_scope('conv5'):
                conv5 = ops.conv2d(conv4,
                                   num_filters_out=256,
                                   kernel_size=3,
                                   stride=1,
                                   stddev=1e-1)
            with tf.variable_scope('pool3'):
                pool3 = ops.max_pool(conv5,
                                     kernel_size=3,
                                     stride=2,
                                     padding='VALID')
            flattened = ops.flatten(pool3, scope='flatten')
            with tf.variable_scope('fc1'):
                fc1 = ops.fc(flattened, num_units_out=4096)
            with tf.variable_scope('fc2'):
                fc2 = ops.fc(fc1, num_units_out=4096)
            with tf.variable_scope('fc3'):
                fc3 = ops.fc(fc2, activation=None, num_units_out=num_classes)
    return fc3
Пример #2
0
def _res_block(inputs, num_filters_start, half_input):
  print (inputs.get_shape())
  num_filters_in = inputs.get_shape()[-1]
  num_filters_out = num_filters_start * 4
  need_branch1 = (num_filters_in != num_filters_out)
  # TODO tf does not support conv2d with stride > kernel_size
  # Workaround: use a pooling to replace this downsampling
  #stride1 = 2 if half_input else 1
  stride1 = 1
  if half_input:
    with tf.variable_scope('downsample'):
      inputs = ops.max_pool(
          inputs,
          kernel_size=3,
          stride=2,
          padding='SAME')
  if need_branch1:
    with tf.variable_scope('branch1'):
      branch1 = ops.conv2d(
          inputs,
          num_filters_out=num_filters_out,
          kernel_size=1,
          stride=stride1)
  with tf.variable_scope('branch2'):
    with tf.variable_scope('a'):
      branch2a = ops.conv2d(
          inputs,
          num_filters_out=num_filters_start,
          kernel_size=1,
          stride=stride1)
    with tf.variable_scope('b'):
      branch2b = ops.conv2d(
          branch2a,
          num_filters_out=num_filters_start,
          kernel_size=3,
          stride=1)
    with tf.variable_scope('c'):
      branch2c = ops.conv2d(
          branch2b,
          num_filters_out=num_filters_out,
          kernel_size=1,
          stride=1,
          activation=None)
  branch_sum = branch2c + branch1 if need_branch1 else branch2c
  ret = tf.nn.relu(branch_sum)
  return ret
Пример #3
0
def _res_block(inputs, num_filters_start, half_input):
    print(inputs.get_shape())
    num_filters_in = inputs.get_shape()[-1]
    num_filters_out = num_filters_start * 4
    need_branch1 = (num_filters_in != num_filters_out)
    # TODO tf does not support conv2d with stride > kernel_size
    # Workaround: use a pooling to replace this downsampling
    #stride1 = 2 if half_input else 1
    stride1 = 1
    if half_input:
        with tf.variable_scope('downsample'):
            inputs = ops.max_pool(inputs,
                                  kernel_size=3,
                                  stride=2,
                                  padding='SAME')
    if need_branch1:
        with tf.variable_scope('branch1'):
            branch1 = ops.conv2d(inputs,
                                 num_filters_out=num_filters_out,
                                 kernel_size=1,
                                 stride=stride1)
    with tf.variable_scope('branch2'):
        with tf.variable_scope('a'):
            branch2a = ops.conv2d(inputs,
                                  num_filters_out=num_filters_start,
                                  kernel_size=1,
                                  stride=stride1)
        with tf.variable_scope('b'):
            branch2b = ops.conv2d(branch2a,
                                  num_filters_out=num_filters_start,
                                  kernel_size=3,
                                  stride=1)
        with tf.variable_scope('c'):
            branch2c = ops.conv2d(branch2b,
                                  num_filters_out=num_filters_out,
                                  kernel_size=1,
                                  stride=1,
                                  activation=None)
    branch_sum = branch2c + branch1 if need_branch1 else branch2c
    ret = tf.nn.relu(branch_sum)
    return ret
Пример #4
0
def inference(inputs, num_classes, scope):
    batch_norm_params = {
        # Decay for the batch_norm moving averages.
        'decay': 0.9997,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
    }
    with tf.op_scope([inputs], scope, 'resnet50'):
        # TODO how to implement scale layer?
        with scopes.arg_scope([ops.conv2d],
                              batch_norm_params=batch_norm_params):
            with tf.variable_scope('conv1'):
                conv1 = ops.conv2d(inputs,
                                   num_filters_out=64,
                                   kernel_size=7,
                                   stride=2)
            with tf.variable_scope('poo1'):
                pool1 = ops.max_pool(conv1,
                                     kernel_size=3,
                                     stride=2,
                                     padding='SAME')
            with tf.variable_scope('res2'):
                res2 = _res_group(pool1,
                                  num_filters_start=64,
                                  num_blocks=3,
                                  first_group=True)
            with tf.variable_scope('res3'):
                res3 = _res_group(res2, num_filters_start=128, num_blocks=8)
            with tf.variable_scope('res4'):
                res4 = _res_group(res3, num_filters_start=256, num_blocks=36)
            with tf.variable_scope('res5'):
                res5 = _res_group(res4, num_filters_start=512, num_blocks=3)
            with tf.variable_scope('pool5'):
                pool5 = ops.avg_pool(res5,
                                     kernel_size=7,
                                     stride=1,
                                     padding='SAME')
            flattened = ops.flatten(pool5, scope='flatten')
            with tf.variable_scope('fc'):
                fc = ops.fc(flattened,
                            activation=None,
                            num_units_out=num_classes)
    return fc
Пример #5
0
def inference(inputs, num_classes, scope):
  batch_norm_params = {
      # Decay for the batch_norm moving averages.
      'decay': 0.9997,
      # epsilon to prevent 0s in variance.
      'epsilon': 0.001,
  }
  with tf.op_scope([inputs], scope, 'resnet50'):
    # TODO how to implement scale layer?
    with scopes.arg_scope([ops.conv2d], batch_norm_params=batch_norm_params):
      with tf.variable_scope('conv1'):
        conv1 = ops.conv2d(
            inputs,
            num_filters_out=64,
            kernel_size=7,
            stride=2)
      with tf.variable_scope('poo1'):
        pool1 = ops.max_pool(
            conv1,
            kernel_size=3,
            stride=2,
            padding='SAME')
      with tf.variable_scope('res2'):
        res2 = _res_group(pool1, num_filters_start=64, num_blocks=3, first_group=True)
      with tf.variable_scope('res3'):
        res3 = _res_group(res2, num_filters_start=128, num_blocks=4)
      with tf.variable_scope('res4'):
        res4 = _res_group(res3, num_filters_start=256, num_blocks=23)
      with tf.variable_scope('res5'):
        res5 = _res_group(res4, num_filters_start=512, num_blocks=3)
      with tf.variable_scope('pool5'):
        pool5 = ops.avg_pool(
            res5, 
            kernel_size=7,
            stride=1,
            padding='SAME')
      flattened = ops.flatten(pool5, scope='flatten')
      with tf.variable_scope('fc'):
        fc = ops.fc(
            flattened,
            activation=None,
            num_units_out=num_classes)
  return fc
Пример #6
0
def inference(inputs, num_classes, scope):
  with tf.op_scope([inputs], scope, 'vgg19'):
    with scopes.arg_scope([ops.conv2d, ops.fc, ops.batch_norm, ops.dropout], is_training=True):
      # block 1
      with tf.variable_scope('block1'):
        with tf.variable_scope('conv1'):
          conv1 = ops.conv2d(
              inputs,
              num_filters_out=64,
              kernel_size=[3, 3],
              stride=1,
              stddev=1e-1)
        with tf.variable_scope('conv2'):
          conv2 = ops.conv2d(
              conv1,
              num_filters_out=64,
              kernel_size=[3, 3],
              stride=1,
              stddev=1e-1)
        with tf.variable_scope('pool'):
          pool = ops.max_pool(
              conv2,
              kernel_size=2,
              stride=2,
              padding='VALID')
      # block 2
      with tf.variable_scope('block2'):
        with tf.variable_scope('conv1'):
          conv1 = ops.conv2d(
              pool,
              num_filters_out=128,
              kernel_size=[3, 3],
              stride=1,
              stddev=1e-1)
        with tf.variable_scope('conv2'):
          conv2 = ops.conv2d(
              conv1,
              num_filters_out=128,
              kernel_size=[3, 3],
              stride=1,
              stddev=1e-1)
        with tf.variable_scope('pool'):
          pool = ops.max_pool(
              conv2,
              kernel_size=2,
              stride=2,
              padding='VALID')
      # block 3
      with tf.variable_scope('block3'):
        with tf.variable_scope('conv1'):
          conv1 = ops.conv2d(
              pool,
              num_filters_out=256,
              kernel_size=[3, 3],
              stride=1,
              stddev=1e-1)
        with tf.variable_scope('conv2'):
          conv2 = ops.conv2d(
              conv1,
              num_filters_out=256,
              kernel_size=[3, 3],
              stride=1,
              stddev=1e-1)
        with tf.variable_scope('conv3'):
          conv3 = ops.conv2d(
              conv2,
              num_filters_out=256,
              kernel_size=[3, 3],
              stride=1,
              stddev=1e-1)
        with tf.variable_scope('pool'):
          pool = ops.max_pool(
              conv3,
              kernel_size=2,
              stride=2,
              padding='VALID')
      # block 4
      with tf.variable_scope('block4'):
        with tf.variable_scope('conv1'):
          conv1 = ops.conv2d(
              pool,
              num_filters_out=512,
              kernel_size=[3, 3],
              stride=1,
              stddev=1e-1)
        with tf.variable_scope('conv2'):
          conv2 = ops.conv2d(
              conv1,
              num_filters_out=512,
              kernel_size=[3, 3],
              stride=1,
              stddev=1e-1)
        with tf.variable_scope('conv3'):
          conv3 = ops.conv2d(
              conv2,
              num_filters_out=512,
              kernel_size=[3, 3],
              stride=1,
              stddev=1e-1)
        with tf.variable_scope('pool'):
          pool = ops.max_pool(
              conv3,
              kernel_size=2,
              stride=2,
              padding='VALID')
      # block 5
      with tf.variable_scope('block5'):
        with tf.variable_scope('conv1'):
          conv1 = ops.conv2d(
              pool,
              num_filters_out=512,
              kernel_size=[3, 3],
              stride=1,
              stddev=1e-1)
        with tf.variable_scope('conv2'):
          conv2 = ops.conv2d(
              conv1,
              num_filters_out=512,
              kernel_size=[3, 3],
              stride=1,
              stddev=1e-1)
        with tf.variable_scope('conv3'):
          conv3 = ops.conv2d(
              conv2,
              num_filters_out=512,
              kernel_size=[3, 3],
              stride=1,
              stddev=1e-1)
        with tf.variable_scope('pool'):
          pool = ops.max_pool(
              conv3,
              kernel_size=2,
              stride=2,
              padding='VALID')
      flattened = ops.flatten(pool, scope='flatten')
      # fc
      with tf.variable_scope('fc1'):
        fc1 = ops.fc(
            flattened,
            num_units_out=4096)
      with tf.variable_scope('fc2'):
        fc2 = ops.fc(
            fc1,
            num_units_out=4096)
      with tf.variable_scope('fc3'):
        fc3 = ops.fc(
            fc2,
            activation=None,
            num_units_out=num_classes)
  return fc3
Пример #7
0
def inference(inputs,
              dropout_keep_prob=0.8,
              num_classes=1000,
              is_training=True,
              restore_logits=True,
              scope=''):
    """Latest Inception from http://arxiv.org/abs/1512.00567.

    "Rethinking the Inception Architecture for Computer Vision"

    Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,
    Zbigniew Wojna

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    dropout_keep_prob: dropout keep_prob.
    num_classes: number of predicted classes.
    is_training: whether is training or not.
    restore_logits: whether or not the logits layers should be restored.
      Useful for fine-tuning a model with different num_classes.
    scope: Optional scope for op_scope.

  Returns:
    a list containing 'logits', 'aux_logits' Tensors.
  """
    # end_points will collect relevant activations for external use, for example
    # summaries or losses.
    end_points = {}
    batch_norm_params = {
        # Decay for the batch_norm moving averages.
        'decay': 0.9997,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
    }
    with tf.op_scope([inputs], scope, 'inception_v3'):
        with scopes.arg_scope([ops.conv2d],
                              batch_norm_params=batch_norm_params):
            with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
                                  stride=1,
                                  padding='VALID'):
                # 299 x 299 x 3
                end_points['conv0'] = ops.conv2d(inputs,
                                                 32, [3, 3],
                                                 stride=2,
                                                 scope='conv0')
                # 149 x 149 x 32
                end_points['conv1'] = ops.conv2d(end_points['conv0'],
                                                 32, [3, 3],
                                                 scope='conv1')
                # 147 x 147 x 32
                end_points['conv2'] = ops.conv2d(end_points['conv1'],
                                                 64, [3, 3],
                                                 padding='SAME',
                                                 scope='conv2')
                # 147 x 147 x 64
                end_points['pool1'] = ops.max_pool(end_points['conv2'], [3, 3],
                                                   stride=2,
                                                   scope='pool1')
                # 73 x 73 x 64
                end_points['conv3'] = ops.conv2d(end_points['pool1'],
                                                 80, [1, 1],
                                                 scope='conv3')
                # 73 x 73 x 80.
                end_points['conv4'] = ops.conv2d(end_points['conv3'],
                                                 192, [3, 3],
                                                 scope='conv4')
                # 71 x 71 x 192.
                end_points['pool2'] = ops.max_pool(end_points['conv4'], [3, 3],
                                                   stride=2,
                                                   scope='pool2')
                # 35 x 35 x 192.
                net = end_points['pool2']
            # Inception blocks
            with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
                                  stride=1,
                                  padding='SAME'):
                # mixed: 35 x 35 x 256.
                with tf.variable_scope('mixed_35x35x256a'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 64, [1, 1])
                    with tf.variable_scope('branch5x5'):
                        branch5x5 = ops.conv2d(net, 48, [1, 1])
                        branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 64, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 32, [1, 1])
                    net = tf.concat(
                        3, [branch1x1, branch5x5, branch3x3dbl, branch_pool])
                    end_points['mixed_35x35x256a'] = net
                # mixed_1: 35 x 35 x 288.
                with tf.variable_scope('mixed_35x35x288a'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 64, [1, 1])
                    with tf.variable_scope('branch5x5'):
                        branch5x5 = ops.conv2d(net, 48, [1, 1])
                        branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 64, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 64, [1, 1])
                    net = tf.concat(
                        3, [branch1x1, branch5x5, branch3x3dbl, branch_pool])
                    end_points['mixed_35x35x288a'] = net
                # mixed_2: 35 x 35 x 288.
                with tf.variable_scope('mixed_35x35x288b'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 64, [1, 1])
                    with tf.variable_scope('branch5x5'):
                        branch5x5 = ops.conv2d(net, 48, [1, 1])
                        branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 64, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 64, [1, 1])
                    net = tf.concat(
                        3, [branch1x1, branch5x5, branch3x3dbl, branch_pool])
                    end_points['mixed_35x35x288b'] = net
                # mixed_3: 17 x 17 x 768.
                with tf.variable_scope('mixed_17x17x768a'):
                    with tf.variable_scope('branch3x3'):
                        branch3x3 = ops.conv2d(net,
                                               384, [3, 3],
                                               stride=2,
                                               padding='VALID')
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 64, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                        branch3x3dbl = ops.conv2d(branch3x3dbl,
                                                  96, [3, 3],
                                                  stride=2,
                                                  padding='VALID')
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.max_pool(net, [3, 3],
                                                   stride=2,
                                                   padding='VALID')
                    net = tf.concat(3, [branch3x3, branch3x3dbl, branch_pool])
                    end_points['mixed_17x17x768a'] = net
                # mixed4: 17 x 17 x 768.
                with tf.variable_scope('mixed_17x17x768b'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 192, [1, 1])
                    with tf.variable_scope('branch7x7'):
                        branch7x7 = ops.conv2d(net, 128, [1, 1])
                        branch7x7 = ops.conv2d(branch7x7, 128, [1, 7])
                        branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
                    with tf.variable_scope('branch7x7dbl'):
                        branch7x7dbl = ops.conv2d(net, 128, [1, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [1, 7])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat(
                        3, [branch1x1, branch7x7, branch7x7dbl, branch_pool])
                    end_points['mixed_17x17x768b'] = net
                # mixed_5: 17 x 17 x 768.
                with tf.variable_scope('mixed_17x17x768c'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 192, [1, 1])
                    with tf.variable_scope('branch7x7'):
                        branch7x7 = ops.conv2d(net, 160, [1, 1])
                        branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])
                        branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
                    with tf.variable_scope('branch7x7dbl'):
                        branch7x7dbl = ops.conv2d(net, 160, [1, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat(
                        3, [branch1x1, branch7x7, branch7x7dbl, branch_pool])
                    end_points['mixed_17x17x768c'] = net
                # mixed_6: 17 x 17 x 768.
                with tf.variable_scope('mixed_17x17x768d'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 192, [1, 1])
                    with tf.variable_scope('branch7x7'):
                        branch7x7 = ops.conv2d(net, 160, [1, 1])
                        branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])
                        branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
                    with tf.variable_scope('branch7x7dbl'):
                        branch7x7dbl = ops.conv2d(net, 160, [1, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat(
                        3, [branch1x1, branch7x7, branch7x7dbl, branch_pool])
                    end_points['mixed_17x17x768d'] = net
                # mixed_7: 17 x 17 x 768.
                with tf.variable_scope('mixed_17x17x768e'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 192, [1, 1])
                    with tf.variable_scope('branch7x7'):
                        branch7x7 = ops.conv2d(net, 192, [1, 1])
                        branch7x7 = ops.conv2d(branch7x7, 192, [1, 7])
                        branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
                    with tf.variable_scope('branch7x7dbl'):
                        branch7x7dbl = ops.conv2d(net, 192, [1, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat(
                        3, [branch1x1, branch7x7, branch7x7dbl, branch_pool])
                    end_points['mixed_17x17x768e'] = net
                # Auxiliary Head logits
                aux_logits = tf.identity(end_points['mixed_17x17x768e'])
                with tf.variable_scope('aux_logits'):
                    aux_logits = ops.avg_pool(aux_logits, [5, 5],
                                              stride=3,
                                              padding='VALID')
                    aux_logits = ops.conv2d(aux_logits,
                                            128, [1, 1],
                                            scope='proj')
                    # Shape of feature map before the final layer.
                    shape = aux_logits.get_shape()
                    aux_logits = ops.conv2d(aux_logits,
                                            768,
                                            shape[1:3],
                                            stddev=0.01,
                                            padding='VALID')
                    aux_logits = ops.flatten(aux_logits)
                    aux_logits = ops.fc(aux_logits,
                                        num_classes,
                                        activation=None,
                                        stddev=0.001,
                                        restore=restore_logits)
                    end_points['aux_logits'] = aux_logits
                # mixed_8: 8 x 8 x 1280.
                # Note that the scope below is not changed to not void previous
                # checkpoints.
                # (TODO) Fix the scope when appropriate.
                with tf.variable_scope('mixed_17x17x1280a'):
                    with tf.variable_scope('branch3x3'):
                        branch3x3 = ops.conv2d(net, 192, [1, 1])
                        branch3x3 = ops.conv2d(branch3x3,
                                               320, [3, 3],
                                               stride=2,
                                               padding='VALID')
                    with tf.variable_scope('branch7x7x3'):
                        branch7x7x3 = ops.conv2d(net, 192, [1, 1])
                        branch7x7x3 = ops.conv2d(branch7x7x3, 192, [1, 7])
                        branch7x7x3 = ops.conv2d(branch7x7x3, 192, [7, 1])
                        branch7x7x3 = ops.conv2d(branch7x7x3,
                                                 192, [3, 3],
                                                 stride=2,
                                                 padding='VALID')
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.max_pool(net, [3, 3],
                                                   stride=2,
                                                   padding='VALID')
                    net = tf.concat(3, [branch3x3, branch7x7x3, branch_pool])
                    end_points['mixed_17x17x1280a'] = net
                # mixed_9: 8 x 8 x 2048.
                with tf.variable_scope('mixed_8x8x2048a'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 320, [1, 1])
                    with tf.variable_scope('branch3x3'):
                        branch3x3 = ops.conv2d(net, 384, [1, 1])
                        branch3x3 = tf.concat(3, [
                            ops.conv2d(branch3x3, 384, [1, 3]),
                            ops.conv2d(branch3x3, 384, [3, 1])
                        ])
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 448, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
                        branch3x3dbl = tf.concat(3, [
                            ops.conv2d(branch3x3dbl, 384, [1, 3]),
                            ops.conv2d(branch3x3dbl, 384, [3, 1])
                        ])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat(
                        3, [branch1x1, branch3x3, branch3x3dbl, branch_pool])
                    end_points['mixed_8x8x2048a'] = net
                # mixed_10: 8 x 8 x 2048.
                with tf.variable_scope('mixed_8x8x2048b'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 320, [1, 1])
                    with tf.variable_scope('branch3x3'):
                        branch3x3 = ops.conv2d(net, 384, [1, 1])
                        branch3x3 = tf.concat(3, [
                            ops.conv2d(branch3x3, 384, [1, 3]),
                            ops.conv2d(branch3x3, 384, [3, 1])
                        ])
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 448, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
                        branch3x3dbl = tf.concat(3, [
                            ops.conv2d(branch3x3dbl, 384, [1, 3]),
                            ops.conv2d(branch3x3dbl, 384, [3, 1])
                        ])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat(
                        3, [branch1x1, branch3x3, branch3x3dbl, branch_pool])
                    end_points['mixed_8x8x2048b'] = net
                # Final pooling and prediction
                with tf.variable_scope('logits'):
                    shape = net.get_shape()
                    net = ops.avg_pool(net,
                                       shape[1:3],
                                       padding='VALID',
                                       scope='pool')
                    # 1 x 1 x 2048
                    net = ops.dropout(net, dropout_keep_prob, scope='dropout')
                    net = ops.flatten(net, scope='flatten')
                    # 2048
                    logits = ops.fc(net,
                                    num_classes,
                                    activation=None,
                                    scope='logits',
                                    restore=restore_logits)
                    # 1000
                    end_points['logits'] = logits
                    end_points['predictions'] = tf.nn.softmax(
                        logits, name='predictions')
            return [logits, aux_logits]
Пример #8
0
def inference(images, num_classes, scope):
  with tf.op_scope([images], scope):
    with scopes.arg_scope([ops.conv2d, ops.fc, ops.dropout], is_training=True):
      with tf.variable_scope('conv1'):
        conv1 = ops.conv2d(
            images,
            num_filters_out=64,
            kernel_size=[11, 11],
            stride=4,
            stddev=1e-1)
      with tf.variable_scope('pool1'):
        pool1 = ops.max_pool(
            conv1,
            kernel_size=3,
            stride=2,
            padding='VALID')
      with tf.variable_scope('conv2'):
        conv2 = ops.conv2d(
            pool1,
            num_filters_out=192,
            kernel_size=1,
            stride=1,
            stddev=1e-1)
      with tf.variable_scope('pool2'):
        pool2 = ops.max_pool(
            conv2,
            kernel_size=3,
            stride=2,
            padding='VALID')
      with tf.variable_scope('conv3'):
        conv3 = ops.conv2d(
            pool2,
            num_filters_out=384,
            kernel_size=3,
            stride=1,
            stddev=1e-1)
      with tf.variable_scope('conv4'):
        conv4 = ops.conv2d(
            conv3,
            num_filters_out=256,
            kernel_size=3,
            stride=1,
            stddev=1e-1)
      with tf.variable_scope('conv5'):
        conv5 = ops.conv2d(
            conv4,
            num_filters_out=256,
            kernel_size=3,
            stride=1,
            stddev=1e-1)
      with tf.variable_scope('pool3'):
        pool3 = ops.max_pool(
            conv5,
            kernel_size=3,
            stride=2,
            padding='VALID')
      flattened = ops.flatten(pool3, scope='flatten')
      with tf.variable_scope('fc1'):
        fc1 = ops.fc(
            flattened,
            num_units_out=4096)
      with tf.variable_scope('fc2'):
        fc2 = ops.fc(
            fc1,
            num_units_out=4096)
      with tf.variable_scope('fc3'):
        fc3 = ops.fc(
            fc2,
            activation=None,
            num_units_out=num_classes)
  return fc3
Пример #9
0
def inference(inputs,
              dropout_keep_prob=0.8,
              num_classes=1000,
              is_training=True,
              restore_logits=True,
              scope=''):
  """Latest Inception from http://arxiv.org/abs/1512.00567.

    "Rethinking the Inception Architecture for Computer Vision"

    Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,
    Zbigniew Wojna

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    dropout_keep_prob: dropout keep_prob.
    num_classes: number of predicted classes.
    is_training: whether is training or not.
    restore_logits: whether or not the logits layers should be restored.
      Useful for fine-tuning a model with different num_classes.
    scope: Optional scope for op_scope.

  Returns:
    a list containing 'logits', 'aux_logits' Tensors.
  """
  # end_points will collect relevant activations for external use, for example
  # summaries or losses.
  end_points = {}
  batch_norm_params = {
      # Decay for the batch_norm moving averages.
      'decay': 0.9997,
      # epsilon to prevent 0s in variance.
      'epsilon': 0.001,
  }
  with tf.op_scope([inputs], scope, 'inception_v3'):
    with scopes.arg_scope([ops.conv2d], batch_norm_params=batch_norm_params):
      with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
                            stride=1, padding='VALID'):
        # 299 x 299 x 3
        end_points['conv0'] = ops.conv2d(inputs, 32, [3, 3], stride=2,
                                         scope='conv0')
        # 149 x 149 x 32
        end_points['conv1'] = ops.conv2d(end_points['conv0'], 32, [3, 3],
                                         scope='conv1')
        # 147 x 147 x 32
        end_points['conv2'] = ops.conv2d(end_points['conv1'], 64, [3, 3],
                                         padding='SAME', scope='conv2')
        # 147 x 147 x 64
        end_points['pool1'] = ops.max_pool(end_points['conv2'], [3, 3],
                                           stride=2, scope='pool1')
        # 73 x 73 x 64
        end_points['conv3'] = ops.conv2d(end_points['pool1'], 80, [1, 1],
                                         scope='conv3')
        # 73 x 73 x 80.
        end_points['conv4'] = ops.conv2d(end_points['conv3'], 192, [3, 3],
                                         scope='conv4')
        # 71 x 71 x 192.
        end_points['pool2'] = ops.max_pool(end_points['conv4'], [3, 3],
                                           stride=2, scope='pool2')
        # 35 x 35 x 192.
        net = end_points['pool2']
      # Inception blocks
      with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
                            stride=1, padding='SAME'):
        # mixed: 35 x 35 x 256.
        with tf.variable_scope('mixed_35x35x256a'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 64, [1, 1])
          with tf.variable_scope('branch5x5'):
            branch5x5 = ops.conv2d(net, 48, [1, 1])
            branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
          with tf.variable_scope('branch3x3dbl'):
            branch3x3dbl = ops.conv2d(net, 64, [1, 1])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 32, [1, 1])
          net = tf.concat(3, [branch1x1, branch5x5, branch3x3dbl, branch_pool])
          end_points['mixed_35x35x256a'] = net
        # mixed_1: 35 x 35 x 288.
        with tf.variable_scope('mixed_35x35x288a'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 64, [1, 1])
          with tf.variable_scope('branch5x5'):
            branch5x5 = ops.conv2d(net, 48, [1, 1])
            branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
          with tf.variable_scope('branch3x3dbl'):
            branch3x3dbl = ops.conv2d(net, 64, [1, 1])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 64, [1, 1])
          net = tf.concat(3, [branch1x1, branch5x5, branch3x3dbl, branch_pool])
          end_points['mixed_35x35x288a'] = net
        # mixed_2: 35 x 35 x 288.
        with tf.variable_scope('mixed_35x35x288b'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 64, [1, 1])
          with tf.variable_scope('branch5x5'):
            branch5x5 = ops.conv2d(net, 48, [1, 1])
            branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
          with tf.variable_scope('branch3x3dbl'):
            branch3x3dbl = ops.conv2d(net, 64, [1, 1])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 64, [1, 1])
          net = tf.concat(3, [branch1x1, branch5x5, branch3x3dbl, branch_pool])
          end_points['mixed_35x35x288b'] = net
        # mixed_3: 17 x 17 x 768.
        with tf.variable_scope('mixed_17x17x768a'):
          with tf.variable_scope('branch3x3'):
            branch3x3 = ops.conv2d(net, 384, [3, 3], stride=2, padding='VALID')
          with tf.variable_scope('branch3x3dbl'):
            branch3x3dbl = ops.conv2d(net, 64, [1, 1])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3],
                                      stride=2, padding='VALID')
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID')
          net = tf.concat(3, [branch3x3, branch3x3dbl, branch_pool])
          end_points['mixed_17x17x768a'] = net
        # mixed4: 17 x 17 x 768.
        with tf.variable_scope('mixed_17x17x768b'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 192, [1, 1])
          with tf.variable_scope('branch7x7'):
            branch7x7 = ops.conv2d(net, 128, [1, 1])
            branch7x7 = ops.conv2d(branch7x7, 128, [1, 7])
            branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
          with tf.variable_scope('branch7x7dbl'):
            branch7x7dbl = ops.conv2d(net, 128, [1, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [1, 7])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
          net = tf.concat(3, [branch1x1, branch7x7, branch7x7dbl, branch_pool])
          end_points['mixed_17x17x768b'] = net
        # mixed_5: 17 x 17 x 768.
        with tf.variable_scope('mixed_17x17x768c'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 192, [1, 1])
          with tf.variable_scope('branch7x7'):
            branch7x7 = ops.conv2d(net, 160, [1, 1])
            branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])
            branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
          with tf.variable_scope('branch7x7dbl'):
            branch7x7dbl = ops.conv2d(net, 160, [1, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
          net = tf.concat(3, [branch1x1, branch7x7, branch7x7dbl, branch_pool])
          end_points['mixed_17x17x768c'] = net
        # mixed_6: 17 x 17 x 768.
        with tf.variable_scope('mixed_17x17x768d'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 192, [1, 1])
          with tf.variable_scope('branch7x7'):
            branch7x7 = ops.conv2d(net, 160, [1, 1])
            branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])
            branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
          with tf.variable_scope('branch7x7dbl'):
            branch7x7dbl = ops.conv2d(net, 160, [1, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
          net = tf.concat(3, [branch1x1, branch7x7, branch7x7dbl, branch_pool])
          end_points['mixed_17x17x768d'] = net
        # mixed_7: 17 x 17 x 768.
        with tf.variable_scope('mixed_17x17x768e'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 192, [1, 1])
          with tf.variable_scope('branch7x7'):
            branch7x7 = ops.conv2d(net, 192, [1, 1])
            branch7x7 = ops.conv2d(branch7x7, 192, [1, 7])
            branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
          with tf.variable_scope('branch7x7dbl'):
            branch7x7dbl = ops.conv2d(net, 192, [1, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
          net = tf.concat(3, [branch1x1, branch7x7, branch7x7dbl, branch_pool])
          end_points['mixed_17x17x768e'] = net
        # Auxiliary Head logits
        aux_logits = tf.identity(end_points['mixed_17x17x768e'])
        with tf.variable_scope('aux_logits'):
          aux_logits = ops.avg_pool(aux_logits, [5, 5], stride=3,
                                    padding='VALID')
          aux_logits = ops.conv2d(aux_logits, 128, [1, 1], scope='proj')
          # Shape of feature map before the final layer.
          shape = aux_logits.get_shape()
          aux_logits = ops.conv2d(aux_logits, 768, shape[1:3], stddev=0.01,
                                  padding='VALID')
          aux_logits = ops.flatten(aux_logits)
          aux_logits = ops.fc(aux_logits, num_classes, activation=None,
                              stddev=0.001, restore=restore_logits)
          end_points['aux_logits'] = aux_logits
        # mixed_8: 8 x 8 x 1280.
        # Note that the scope below is not changed to not void previous
        # checkpoints.
        # (TODO) Fix the scope when appropriate.
        with tf.variable_scope('mixed_17x17x1280a'):
          with tf.variable_scope('branch3x3'):
            branch3x3 = ops.conv2d(net, 192, [1, 1])
            branch3x3 = ops.conv2d(branch3x3, 320, [3, 3], stride=2,
                                   padding='VALID')
          with tf.variable_scope('branch7x7x3'):
            branch7x7x3 = ops.conv2d(net, 192, [1, 1])
            branch7x7x3 = ops.conv2d(branch7x7x3, 192, [1, 7])
            branch7x7x3 = ops.conv2d(branch7x7x3, 192, [7, 1])
            branch7x7x3 = ops.conv2d(branch7x7x3, 192, [3, 3],
                                     stride=2, padding='VALID')
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID')
          net = tf.concat(3, [branch3x3, branch7x7x3, branch_pool])
          end_points['mixed_17x17x1280a'] = net
        # mixed_9: 8 x 8 x 2048.
        with tf.variable_scope('mixed_8x8x2048a'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 320, [1, 1])
          with tf.variable_scope('branch3x3'):
            branch3x3 = ops.conv2d(net, 384, [1, 1])
            branch3x3 = tf.concat(3, [ops.conv2d(branch3x3, 384, [1, 3]),
                                      ops.conv2d(branch3x3, 384, [3, 1])])
          with tf.variable_scope('branch3x3dbl'):
            branch3x3dbl = ops.conv2d(net, 448, [1, 1])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
            branch3x3dbl = tf.concat(3, [ops.conv2d(branch3x3dbl, 384, [1, 3]),
                                         ops.conv2d(branch3x3dbl, 384, [3, 1])])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
          net = tf.concat(3, [branch1x1, branch3x3, branch3x3dbl, branch_pool])
          end_points['mixed_8x8x2048a'] = net
        # mixed_10: 8 x 8 x 2048.
        with tf.variable_scope('mixed_8x8x2048b'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 320, [1, 1])
          with tf.variable_scope('branch3x3'):
            branch3x3 = ops.conv2d(net, 384, [1, 1])
            branch3x3 = tf.concat(3, [ops.conv2d(branch3x3, 384, [1, 3]),
                                      ops.conv2d(branch3x3, 384, [3, 1])])
          with tf.variable_scope('branch3x3dbl'):
            branch3x3dbl = ops.conv2d(net, 448, [1, 1])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
            branch3x3dbl = tf.concat(3, [ops.conv2d(branch3x3dbl, 384, [1, 3]),
                                         ops.conv2d(branch3x3dbl, 384, [3, 1])])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
          net = tf.concat(3, [branch1x1, branch3x3, branch3x3dbl, branch_pool])
          end_points['mixed_8x8x2048b'] = net
        # Final pooling and prediction
        with tf.variable_scope('logits'):
          shape = net.get_shape()
          net = ops.avg_pool(net, shape[1:3], padding='VALID', scope='pool')
          # 1 x 1 x 2048
          net = ops.dropout(net, dropout_keep_prob, scope='dropout')
          net = ops.flatten(net, scope='flatten')
          # 2048
          logits = ops.fc(net, num_classes, activation=None, scope='logits',
                          restore=restore_logits)
          # 1000
          end_points['logits'] = logits
          end_points['predictions'] = tf.nn.softmax(logits, name='predictions')
      return [logits, aux_logits]
Пример #10
0
def inference(inputs, num_classes, scope):
    with tf.op_scope([inputs], scope, 'vgg19'):
        with scopes.arg_scope(
            [ops.conv2d, ops.fc, ops.batch_norm, ops.dropout],
                is_training=True):
            # block 1
            with tf.variable_scope('block1'):
                with tf.variable_scope('conv1'):
                    conv1 = ops.conv2d(inputs,
                                       num_filters_out=64,
                                       kernel_size=[3, 3],
                                       stride=1,
                                       stddev=1e-1)
                with tf.variable_scope('conv2'):
                    conv2 = ops.conv2d(conv1,
                                       num_filters_out=64,
                                       kernel_size=[3, 3],
                                       stride=1,
                                       stddev=1e-1)
                with tf.variable_scope('pool'):
                    pool = ops.max_pool(conv2,
                                        kernel_size=2,
                                        stride=2,
                                        padding='VALID')
            # block 2
            with tf.variable_scope('block2'):
                with tf.variable_scope('conv1'):
                    conv1 = ops.conv2d(pool,
                                       num_filters_out=128,
                                       kernel_size=[3, 3],
                                       stride=1,
                                       stddev=1e-1)
                with tf.variable_scope('conv2'):
                    conv2 = ops.conv2d(conv1,
                                       num_filters_out=128,
                                       kernel_size=[3, 3],
                                       stride=1,
                                       stddev=1e-1)
                with tf.variable_scope('pool'):
                    pool = ops.max_pool(conv2,
                                        kernel_size=2,
                                        stride=2,
                                        padding='VALID')
            # block 3
            with tf.variable_scope('block3'):
                with tf.variable_scope('conv1'):
                    conv1 = ops.conv2d(pool,
                                       num_filters_out=256,
                                       kernel_size=[3, 3],
                                       stride=1,
                                       stddev=1e-1)
                with tf.variable_scope('conv2'):
                    conv2 = ops.conv2d(conv1,
                                       num_filters_out=256,
                                       kernel_size=[3, 3],
                                       stride=1,
                                       stddev=1e-1)
                with tf.variable_scope('conv3'):
                    conv3 = ops.conv2d(conv2,
                                       num_filters_out=256,
                                       kernel_size=[3, 3],
                                       stride=1,
                                       stddev=1e-1)
                with tf.variable_scope('pool'):
                    pool = ops.max_pool(conv3,
                                        kernel_size=2,
                                        stride=2,
                                        padding='VALID')
            # block 4
            with tf.variable_scope('block4'):
                with tf.variable_scope('conv1'):
                    conv1 = ops.conv2d(pool,
                                       num_filters_out=512,
                                       kernel_size=[3, 3],
                                       stride=1,
                                       stddev=1e-1)
                with tf.variable_scope('conv2'):
                    conv2 = ops.conv2d(conv1,
                                       num_filters_out=512,
                                       kernel_size=[3, 3],
                                       stride=1,
                                       stddev=1e-1)
                with tf.variable_scope('conv3'):
                    conv3 = ops.conv2d(conv2,
                                       num_filters_out=512,
                                       kernel_size=[3, 3],
                                       stride=1,
                                       stddev=1e-1)
                with tf.variable_scope('pool'):
                    pool = ops.max_pool(conv3,
                                        kernel_size=2,
                                        stride=2,
                                        padding='VALID')
            # block 5
            with tf.variable_scope('block5'):
                with tf.variable_scope('conv1'):
                    conv1 = ops.conv2d(pool,
                                       num_filters_out=512,
                                       kernel_size=[3, 3],
                                       stride=1,
                                       stddev=1e-1)
                with tf.variable_scope('conv2'):
                    conv2 = ops.conv2d(conv1,
                                       num_filters_out=512,
                                       kernel_size=[3, 3],
                                       stride=1,
                                       stddev=1e-1)
                with tf.variable_scope('conv3'):
                    conv3 = ops.conv2d(conv2,
                                       num_filters_out=512,
                                       kernel_size=[3, 3],
                                       stride=1,
                                       stddev=1e-1)
                with tf.variable_scope('pool'):
                    pool = ops.max_pool(conv3,
                                        kernel_size=2,
                                        stride=2,
                                        padding='VALID')
            flattened = ops.flatten(pool, scope='flatten')
            # fc
            with tf.variable_scope('fc1'):
                fc1 = ops.fc(flattened, num_units_out=4096)
            with tf.variable_scope('fc2'):
                fc2 = ops.fc(fc1, num_units_out=4096)
            with tf.variable_scope('fc3'):
                fc3 = ops.fc(fc2, activation=None, num_units_out=num_classes)
    return fc3