def _test(self, input_shape, block_shape, base_paddings):
   input_shape = np.array(input_shape)
   block_shape = np.array(block_shape)
   if base_paddings is not None:
     base_paddings = np.array(base_paddings)
   # Check with constants.
   paddings, crops = tf.required_space_to_batch_paddings(
       input_shape, block_shape, base_paddings)
   paddings_const = tensor_util.constant_value(paddings)
   crops_const = tensor_util.constant_value(crops)
   self.assertIsNotNone(paddings_const)
   self.assertIsNotNone(crops_const)
   self._checkProperties(input_shape, block_shape, base_paddings,
                         paddings_const, crops_const)
   # Check with non-constants.
   assignments = {}
   input_shape_placeholder = tf.placeholder(tf.int32)
   assignments[input_shape_placeholder] = input_shape
   block_shape_placeholder = tf.placeholder(tf.int32, [len(block_shape)])
   assignments[block_shape_placeholder] = block_shape
   if base_paddings is not None:
     base_paddings_placeholder = tf.placeholder(tf.int32,
                                                [len(block_shape), 2])
     assignments[base_paddings_placeholder] = base_paddings
   else:
     base_paddings_placeholder = None
   t_paddings, t_crops = tf.required_space_to_batch_paddings(
       input_shape_placeholder, block_shape_placeholder,
       base_paddings_placeholder)
   with self.test_session():
     paddings_result = t_paddings.eval(assignments)
     crops_result = t_crops.eval(assignments)
   self.assertAllEqual(paddings_result, paddings_const)
   self.assertAllEqual(crops_result, crops_const)
Beispiel #2
0
def _build_dilated(image, is_training=False):
  #image = tf.Print(image, [tf.shape(image)], message='img_shape = ', summarize=10)
  bn_params['is_training'] = is_training
  with arg_scope([layers.conv2d],
      data_format=data_format, stride=1, padding='SAME', activation_fn=None,
      normalizer_fn=None, normalizer_params=None,
      weights_initializer=init_func, biases_initializer=None,
      weights_regularizer=layers.l2_regularizer(weight_decay)):
    with tf.variable_scope('conv0'):
      net = layers.conv2d(image, 2*growth, 7, stride=2)
      #net = layers.conv2d(image, 2*growth, 7, stride=1)
      # TODO
      net = tf.contrib.layers.batch_norm(net, **bn_params)
      net = tf.nn.relu(net)

    net = layers.max_pool2d(net, 2, stride=2, padding='SAME',
                            data_format=data_format, scope='pool0')

    skip_layers = []

    # no diff with double BN from orig densenet, first=True
    net = dense_block(net, block_sizes[0], growth, 'block0', is_training, first=True)
    #net, skip = dense_block(net, block_sizes[0], growth, 'block0', is_training,
    #    first=True, split=True)
    #skip_layers.append([skip, 256, growth_up, 'block0_mid_refine', depth])
    #skip_layers.append([skip, up_sizes[0], growth_up, 'block0_mid_refine'])
    skip_layers.append([net, up_sizes[0], growth_up, 'block0_refine'])
    net, _ = transition(net, compression, 'block0/transition')
    #skip_layers.append([skip, up_sizes[0], growth_up, 'block0_refine'])

    #net, skip = dense_block(net, block_sizes[1], growth, 'block1', is_training, split=True)
    #skip_layers.append([skip, up_sizes[1], growth_up, 'block1_mid_refine'])
    net = dense_block(net, block_sizes[1], growth, 'block1', is_training)
    skip_layers.append([net, up_sizes[1], growth_up, 'block1_refine'])
    net, _ = transition(net, compression, 'block1/transition')
    #skip_layers.append([skip, up_sizes[1], growth_up, 'block1_refine'])

    # works the same with split, not 100%
    #context_pool_num = 3
    #context_pool_num = 4
    context_pool_num = 5
    #net, skip = dense_block(net, block_sizes[2], growth, 'block2', is_training, split=True)
    #skip_layers.append([skip, up_sizes[2], growth_up, 'block2_mid_refine'])
    net = dense_block(net, block_sizes[2], growth, 'block2', is_training)
    #skip_layers.append([net, up_sizes[3], growth_up, 'block2_refine'])
    #skip_layers.append([net, up_sizes[2], growth_up, 'block2_refine'])
    net, _ = transition(net, compression, 'block2/transition', stride=1)

    bsz = 2
    paddings, crops = tf.required_space_to_batch_paddings(image_size(net), [bsz, bsz])
    net = tf.space_to_batch(net, paddings=paddings, block_size=bsz)
    net = dense_block(net, block_sizes[3], growth, 'block3', is_training)
    net = tf.batch_to_space(net, crops=crops, block_size=bsz)
    print('before context = ', net)

    with tf.variable_scope('head'):
      net = BNReluConv(net, 512, 'bottleneck', k=1)
      net = _pyramid_pooling(net, size=context_pool_num)
      #net = BNReluConv(net, context_size, 'context_conv', k=3)

      print('Before upsampling: ', net)

      all_logits = [net]
      for skip_layer in reversed(skip_layers):
        net = refine(net, skip_layer)
        all_logits.append(net)
        print('after upsampling = ', net)

      all_logits = [all_logits[0], all_logits[-1]]
      #all_logits = [all_logits[1], all_logits[-1]]
      #all_logits = [all_logits[2], all_logits[-1]]

  with tf.variable_scope('head'):
    for i, logits in enumerate(all_logits):
      with tf.variable_scope('logits_'+str(i)):
      # FIX
      #net = tf.nn.relu(layers.batch_norm(net, **bn_params))
      #logits = layers.conv2d(net, FLAGS.num_classes, 1, activation_fn=None,
      #                       data_format=data_format)
        logits = layers.conv2d(tf.nn.relu(logits), FLAGS.num_classes, 1,
                               activation_fn=None, data_format=data_format)

        if data_format == 'NCHW':
          logits = tf.transpose(logits, perm=[0,2,3,1])
        input_shape = tf.shape(image)[height_dim:height_dim+2]
        logits = tf.image.resize_bilinear(logits, input_shape, name='resize_logits')
        all_logits[i] = logits
    logits = all_logits.pop()
    return logits, all_logits
Beispiel #3
0
 def inputter_crop():
   _, crops = tf.required_space_to_batch_paddings(input_shape, block_shape)
   return tf.cast(crops, tf.float64)
Beispiel #4
0
 def inputter_pad():
   pads, _ = tf.required_space_to_batch_paddings(input_shape, block_shape)
   return tf.cast(pads, tf.float64)
Beispiel #5
0
 def inputter_crop():
     _, crops = tf.required_space_to_batch_paddings(inputs_int32[0],
                                                    inputs_int32[1],
                                                    base_paddings=inputs_int32[2])
     return tf.cast(crops, tf.float64)
Beispiel #6
0
 def inputter_pad():
     pads, _ = tf.required_space_to_batch_paddings(inputs_int32[0],
                                                   inputs_int32[1],
                                                   base_paddings=inputs_int32[2])
     return tf.cast(pads, tf.float64)