Example #1
0
 def build_model(self, images, is_training=True):
   """Build model with input images."""
   with tf.variable_scope("V1NetCNN", reuse=tf.AUTO_REUSE):
     model_config = self.model_config
     num_classes = model_config.num_classes
     n, _, _, _ = images.shape.as_list()
     net = tf.identity(images)
     net = self.preprocess(net)
     net = self.convolution_stem(net)
     net = build_v1net(inputs=net, 
                       timesteps=model_config.timesteps,
                       filters=model_config.v1net_filters,
                       kernel_size=model_config.v1net_kernel_size,
                       is_training=is_training,
                       )
     net = build_avgpool(net)
     net = build_dense(net, units=2)
     net = tf.reshape(net, [n, num_classes])
   return net
Example #2
0
def vgg_a(inputs,
          num_classes=1000,
          is_training=True,
          dropout_keep_prob=0.5,
          spatial_squeeze=True,
          add_v1net_early=False,
          add_v1net=False,
          reuse=None,
          scope='vgg_a',
          fc_conv_padding='VALID',
          global_pool=False):
    """Oxford Net VGG 11-Layers version A Example.

  Note: All the fully_connected layers have been transformed to conv2d layers.
        To use in classification mode, resize input to 224x224.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_classes: number of predicted classes. If 0 or None, the logits layer is
      omitted and the input features to the logits layer are returned instead.
    is_training: whether or not the model is being trained.
    dropout_keep_prob: the probability that activations are kept in the dropout
      layers during training.
    spatial_squeeze: whether or not should squeeze the spatial dimensions of the
      outputs. Useful to remove unnecessary dimensions for classification.
    reuse: whether or not the network and its variables should be reused. To be
      able to reuse 'scope' must be given.
    scope: Optional scope for the variables.
    fc_conv_padding: the type of padding to use for the fully connected layer
      that is implemented as a convolutional layer. Use 'SAME' padding if you
      are applying the network in a fully convolutional manner and want to
      get a prediction map downsampled by a factor of 32 as an output.
      Otherwise, the output prediction map will be (input / 32) - 6 in case of
      'VALID' padding.
    global_pool: Optional boolean flag. If True, the input to the classification
      layer is avgpooled to size 1x1, for any input size. (This is not part
      of the original VGG architecture.)

  Returns:
    net: the output of the logits layer (if num_classes is a non-zero integer),
      or the input to the logits layer (if num_classes is 0 or None).
    end_points: a dict of tensors with intermediate activations.
  """
    with tf.variable_scope(scope, 'vgg_a', [inputs], reuse=reuse) as sc:
        end_points_collection = sc.original_name_scope + '_end_points'
        # Collect outputs for conv2d, fully_connected and max_pool2d.
        with slim.arg_scope([slim.conv2d, slim.max_pool2d],
                            outputs_collections=end_points_collection):
            net = slim.repeat(inputs,
                              1,
                              slim.conv2d,
                              64, [3, 3],
                              scope='conv1')
            net = slim.max_pool2d(net, [2, 2], scope='pool1')
            if add_v1net and FLAGS.v1_timesteps:
                v1_timesteps, v1_kernel_size = 6, 5
                tf.logging.INFO(
                    "Adding V1Net with %s timesteps, %s kernel_size" %
                    (v1_timesteps, v1_kernel_size))
                net = build_v1net(inputs=net,
                                  filters=64,
                                  timesteps=v1_timesteps,
                                  kernel_size=v1_kernel_size)
            net = slim.repeat(net, 1, slim.conv2d, 128, [3, 3], scope='conv2')
            net = slim.max_pool2d(net, [2, 2], scope='pool2')
            net = slim.repeat(net, 2, slim.conv2d, 256, [3, 3], scope='conv3')
            net = slim.max_pool2d(net, [2, 2], scope='pool3')
            net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv4')
            net = slim.max_pool2d(net, [2, 2], scope='pool4')
            net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv5')
            net = slim.max_pool2d(net, [2, 2], scope='pool5')

            # Use conv2d instead of fully_connected layers.
            net = slim.conv2d(net,
                              4096, [7, 7],
                              padding=fc_conv_padding,
                              scope='fc6')
            net = slim.dropout(net,
                               dropout_keep_prob,
                               is_training=is_training,
                               scope='dropout6')
            net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
            # Convert end_points_collection into a end_point dict.
            end_points = slim.utils.convert_collection_to_dict(
                end_points_collection)
            if global_pool:
                net = tf.reduce_mean(input_tensor=net,
                                     axis=[1, 2],
                                     keepdims=True,
                                     name='global_pool')
                end_points['global_pool'] = net
            if num_classes:
                net = slim.dropout(net,
                                   dropout_keep_prob,
                                   is_training=is_training,
                                   scope='dropout7')
                net = slim.conv2d(net,
                                  num_classes, [1, 1],
                                  activation_fn=None,
                                  normalizer_fn=None,
                                  scope='fc8')
                if spatial_squeeze:
                    net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
                end_points[sc.name + '/fc8'] = net
            return net, end_points
Example #3
0
def vgg_16_hed_cam(
    inputs,
    cams,
    num_classes=1,
    is_training=True,
    add_v1net_early=False,
    add_v1net=False,
    reuse=None,
    reduce_conv=True,
    scope='vgg_16',
):
    """VGG-16 implementation of HED.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    is_training: whether or not the model is being trained.
    add_v1net: whether to add v1net blocks after convolutions.
    reuse: whether or not the network and its variables should be reused. To be
      able to reuse 'scope' must be given.
    scope: Optional scope for the variables.
  Returns:
    side_outputs_fullres: list of side output logits resized to input resolution.
    end_points: a dict of tensors with intermediate activations.
  """
    side_outputs = []
    _, h, w, _ = inputs.shape.as_list()
    with tf.variable_scope(scope, 'vgg_16', [inputs], reuse=reuse) as sc:
        end_points_collection = sc.original_name_scope + '_end_points'
        # Collect outputs for conv2d, fully_connected and max_pool2d.
        with slim.arg_scope(
            [slim.conv2d, slim.fully_connected, slim.max_pool2d],
                outputs_collections=end_points_collection):
            net = slim.repeat(inputs,
                              2,
                              slim.conv2d,
                              64, [3, 3],
                              scope='conv1')
            with tf.variable_scope("cam-conv1"):
                cam_net = slim.repeat(cams,
                                      1,
                                      slim.conv2d,
                                      64, [3, 3],
                                      scope="cam-conv1")
                net = net + cam_net

            if add_v1net_early and FLAGS.v1_timesteps:
                with tf.variable_scope("v1net-conv1"):
                    v1_timesteps, v1_kernel_size, n_filters = FLAGS.v1_timesteps, 3, 64
                    net = build_v1net(inputs=net,
                                      filters=n_filters,
                                      timesteps=v1_timesteps,
                                      kernel_size=v1_kernel_size,
                                      is_training=is_training)
            side_outputs.append(net)
            net = slim.max_pool2d(net, [2, 2], scope='pool1')
            cam_net = slim.max_pool2d(cam_net, [2, 2], scope='cam_pool1')

            net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
            with tf.variable_scope("cam-conv2"):
                cam_net = slim.repeat(cam_net,
                                      1,
                                      slim.conv2d,
                                      128, [3, 3],
                                      scope="cam-conv2")
                net = net + cam_net

            if add_v1net and FLAGS.v1_timesteps:
                with tf.variable_scope("v1net-conv2"):
                    v1_timesteps, v1_kernel_size, n_filters = FLAGS.v1_timesteps, 3, 128
                    net = build_v1net(inputs=net,
                                      filters=n_filters,
                                      timesteps=v1_timesteps,
                                      kernel_size=v1_kernel_size,
                                      is_training=is_training)
            side_outputs.append(net)
            net = slim.max_pool2d(net, [2, 2], scope='pool2')
            cam_net = slim.max_pool2d(cam_net, [2, 2], scope='cam_pool2')

            net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
            with tf.variable_scope("cam-conv3"):
                cam_net = slim.repeat(cam_net,
                                      1,
                                      slim.conv2d,
                                      256, [3, 3],
                                      scope="cam-conv3")
                net = net + cam_net

            if add_v1net and FLAGS.v1_timesteps:
                with tf.variable_scope("v1net-conv3"):
                    v1_timesteps, v1_kernel_size, n_filters = FLAGS.v1_timesteps, 3, 256
                    net = build_v1net(inputs=net,
                                      filters=n_filters,
                                      timesteps=v1_timesteps,
                                      kernel_size=v1_kernel_size,
                                      is_training=is_training)
            side_outputs.append(net)
            net = slim.max_pool2d(net, [2, 2], scope='pool3')
            cam_net = slim.max_pool2d(cam_net, [2, 2], scope='cam_pool3')

            net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
            with tf.variable_scope("cam-conv4"):
                cam_net = slim.repeat(cam_net,
                                      1,
                                      slim.conv2d,
                                      512, [3, 3],
                                      scope="cam-conv4")
                net = net + cam_net

            if add_v1net and FLAGS.v1_timesteps:
                with tf.variable_scope("v1net-conv4"):
                    v1_timesteps, v1_kernel_size, n_filters = FLAGS.v1_timesteps, 3, 512
                    net = build_v1net(inputs=net,
                                      filters=n_filters,
                                      timesteps=v1_timesteps,
                                      kernel_size=v1_kernel_size,
                                      is_training=is_training)
            side_outputs.append(net)
            net = slim.max_pool2d(net, [2, 2], scope='pool4')
            cam_net = slim.max_pool2d(cam_net, [2, 2], scope='cam_pool4')

            net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
            with tf.variable_scope("cam-conv5"):
                cam_net = slim.repeat(cam_net,
                                      1,
                                      slim.conv2d,
                                      512, [3, 3],
                                      scope="cam-conv5")
                net = net + cam_net

            if add_v1net and FLAGS.v1_timesteps:
                with tf.variable_scope("v1net-conv5"):
                    v1_timesteps, v1_kernel_size, n_filters = FLAGS.v1_timesteps, 3, 512
                    net = build_v1net(inputs=net,
                                      filters=n_filters,
                                      timesteps=v1_timesteps,
                                      kernel_size=v1_kernel_size,
                                      is_training=is_training)
            side_outputs.append(net)
            end_points = slim.utils.convert_collection_to_dict(
                end_points_collection)
            side_outputs_fullres = [side_outputs[0]]
            side_outputs_fullres = [
                tf.image.resize_bilinear(side_output, [h, w])
                for side_output in side_outputs[1:]
            ]
            with tf.variable_scope("side_output_classifiers", reuse=reuse):
                side_outputs_fullres = [
                    slim.conv2d(
                        side_output,
                        1,
                        [1, 1],
                        activation_fn=None,
                        normalizer_fn=None,
                    ) for side_output in side_outputs_fullres
                ]
            side_outputs_fullres = tf.stack(side_outputs_fullres, axis=0)
            if reduce_conv:
                with tf.variable_scope("side_output_fusion"):
                    side_outputs_ = tf.transpose(side_outputs_fullres,
                                                 (1, 2, 3, 4, 0))
                    side_outputs_ = tf.squeeze(side_outputs_, axis=3)
                    fused_predictions = fuse_predictions(side_outputs_)
            else:
                fused_predictions = tf.reduce_mean(side_outputs_fullres,
                                                   axis=0)
            end_points['fused_predictions'] = fused_predictions
            side_outputs_fullres = tf.reshape(side_outputs_fullres,
                                              (-1, h, w, 1))
            end_points['side_outputs_fullres'] = side_outputs_fullres
            return fused_predictions, end_points
Example #4
0
def resnet_v2_hed(inputs,
                  blocks,
                  num_classes=None,
                  is_training=True,
                  global_pool=True,
                  output_stride=None,
                  include_root_block=True,
                  spatial_squeeze=True,
                  add_v1net=False,
                  add_v1net_early=False,
                  compact=False,
                  reuse=None,
                  scope=None):
    """Generator for v2 (preactivation) ResNet models.
  This function generates a family of ResNet v2 models. See the resnet_v2_*()
  methods for specific model instantiations, obtained by selecting different
  block instantiations that produce ResNets of various depths.
  Training for image classification on Imagenet is usually done with [224, 224]
  inputs, resulting in [7, 7] feature maps at the output of the last ResNet
  block for the ResNets defined in [1] that have nominal stride equal to 32.
  However, for dense prediction tasks we advise that one uses inputs with
  spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In
  this case the feature maps at the ResNet output will have spatial shape
  [(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]
  and corners exactly aligned with the input image corners, which greatly
  facilitates alignment of the features to the image. Using as input [225, 225]
  images results in [8, 8] feature maps at the output of the last ResNet block.
  For dense prediction tasks, the ResNet needs to run in fully-convolutional
  (FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all
  have nominal stride equal to 32 and a good choice in FCN mode is to use
  output_stride=16 in order to increase the density of the computed features at
  small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.
  Args:
    inputs: A tensor of size [batch, height_in, width_in, channels].
    blocks: A list of length equal to the number of ResNet blocks. Each element
      is a resnet_utils.Block object describing the units in the block.
    num_classes: Number of predicted classes for classification tasks.
      If 0 or None, we return the features before the logit layer.
    is_training: whether batch_norm layers are in training mode.
    global_pool: If True, we perform global average pooling before computing the
      logits. Set to True for image classification, False for dense prediction.
    output_stride: If None, then the output will be computed at the nominal
      network stride. If output_stride is not None, it specifies the requested
      ratio of input to output spatial resolution.
    include_root_block: If True, include the initial convolution followed by
      max-pooling, if False excludes it. If excluded, `inputs` should be the
      results of an activation-less convolution.
    spatial_squeeze: if True, logits is of shape [B, C], if false logits is
        of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
        To use this parameter, the input images must be smaller than 300x300
        pixels, in which case the output logit layer does not contain spatial
        information and can be removed.
    reuse: whether or not the network and its variables should be reused. To be
      able to reuse 'scope' must be given.
    scope: Optional variable_scope.
  Returns:
    net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
      If global_pool is False, then height_out and width_out are reduced by a
      factor of output_stride compared to the respective height_in and width_in,
      else both height_out and width_out equal one. If num_classes is 0 or None,
      then net is the output of the last ResNet block, potentially after global
      average pooling. If num_classes is a non-zero integer, net contains the
      pre-softmax activations.
    end_points: A dictionary from components of the network to the corresponding
      activation.
  Raises:
    ValueError: If the target output_stride is not valid.
  """
    with tf.variable_scope(scope, 'resnet_v2', [inputs], reuse=reuse) as sc:
        end_points_collection = sc.original_name_scope + '_end_points'
        with slim.arg_scope(
            [slim.conv2d, bottleneck, resnet_utils.stack_blocks_dense],
                outputs_collections=end_points_collection):
            with slim.arg_scope([slim.batch_norm], is_training=is_training):
                net = inputs
                _, h, w, _ = inputs.shape.as_list()
                if include_root_block:
                    if output_stride is not None:
                        if output_stride % 4 != 0:
                            raise ValueError(
                                'The output_stride needs to be a multiple of 4.'
                            )
                        output_stride /= 4
                    # We do not include batch normalization or activation functions in
                    # conv1 because the first ResNet unit will perform these. Cf.
                    # Appendix of [2].
                    with slim.arg_scope([slim.conv2d],
                                        activation_fn=None,
                                        normalizer_fn=None):
                        net = resnet_utils.conv2d_same(net,
                                                       64,
                                                       7,
                                                       stride=2,
                                                       scope='conv1')
                        net = slim.max_pool2d(net, [3, 3],
                                              stride=2,
                                              scope='pool1')
                    if add_v1net_early:
                        with tf.variable_scope("v1net-conv1"):
                            # net = slim.conv2d(net, 32, [1,1])
                            # v1_timesteps, v1_kernel_size, n_filters = 4, 3, 32
                            v1_timesteps, v1_kernel_size, n_filters = 4, 3, 64
                            net = build_v1net(inputs=net,
                                              filters=n_filters,
                                              timesteps=v1_timesteps,
                                              kernel_size=v1_kernel_size,
                                              compact=compact)
                            # net = slim.conv2d(net, 64, [1,1])
                    # net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')
                net, side_outputs = resnet_utils.stack_blocks_dense_hed(
                    net, blocks, output_stride)
                # This is needed because the pre-activation variant does not have batch
                # normalization or activation functions in the residual unit output. See
                # Appendix of [2].
                net = slim.batch_norm(net,
                                      activation_fn=tf.nn.relu,
                                      scope='postnorm')
                # Convert end_points_collection into a dictionary of end_points.
                end_points = slim.utils.convert_collection_to_dict(
                    end_points_collection)
                side_outputs_fullres, predictions = build_hed_output(
                    side_outputs,
                    image_size=[h, w],
                    reuse=reuse,
                )
                end_points['fused_predictions'] = predictions
                end_points['side_outputs_fullres'] = side_outputs_fullres
                return predictions, end_points