Example #1
0
 def testAtrousFullyConvolutionalValues(self):
     """Verify dense feature extraction with atrous convolution."""
     nominal_stride = 32
     for output_stride in [4, 8, 16, 32, None]:
         with arg_scope(resnet_utils.resnet_arg_scope()):
             with ops.Graph().as_default():
                 with self.cached_session() as sess:
                     random_seed.set_random_seed(0)
                     inputs = create_test_input(2, 81, 81, 3)
                     # Dense feature extraction followed by subsampling.
                     output, _ = self._resnet_small(
                         inputs,
                         None,
                         is_training=False,
                         global_pool=False,
                         output_stride=output_stride)
                     if output_stride is None:
                         factor = 1
                     else:
                         factor = nominal_stride // output_stride
                     output = resnet_utils.subsample(output, factor)
                     # Make the two networks use the same weights.
                     variable_scope.get_variable_scope().reuse_variables()
                     # Feature extraction at the nominal network rate.
                     expected, _ = self._resnet_small(inputs,
                                                      None,
                                                      is_training=False,
                                                      global_pool=False)
                     sess.run(variables.global_variables_initializer())
                     self.assertAllClose(output.eval(),
                                         expected.eval(),
                                         atol=2e-4,
                                         rtol=1e-4)
Example #2
0
 def testEndPointsV1(self):
     """Test the end points of a tiny v1 bottleneck network."""
     blocks = [
         resnet_v1.resnet_v1_block('block1',
                                   base_depth=1,
                                   num_units=2,
                                   stride=2),
         resnet_v1.resnet_v1_block('block2',
                                   base_depth=2,
                                   num_units=2,
                                   stride=1),
     ]
     inputs = create_test_input(2, 32, 16, 3)
     with arg_scope(resnet_utils.resnet_arg_scope()):
         _, end_points = self._resnet_plain(inputs, blocks, scope='tiny')
     expected = [
         'tiny/block1/unit_1/bottleneck_v1/shortcut',
         'tiny/block1/unit_1/bottleneck_v1/conv1',
         'tiny/block1/unit_1/bottleneck_v1/conv2',
         'tiny/block1/unit_1/bottleneck_v1/conv3',
         'tiny/block1/unit_2/bottleneck_v1/conv1',
         'tiny/block1/unit_2/bottleneck_v1/conv2',
         'tiny/block1/unit_2/bottleneck_v1/conv3',
         'tiny/block2/unit_1/bottleneck_v1/shortcut',
         'tiny/block2/unit_1/bottleneck_v1/conv1',
         'tiny/block2/unit_1/bottleneck_v1/conv2',
         'tiny/block2/unit_1/bottleneck_v1/conv3',
         'tiny/block2/unit_2/bottleneck_v1/conv1',
         'tiny/block2/unit_2/bottleneck_v1/conv2',
         'tiny/block2/unit_2/bottleneck_v1/conv3'
     ]
     self.assertItemsEqual(expected, end_points)
Example #3
0
 def resnet_inference(inputs, is_train=True):
     x = tf.reshape(inputs, [FLAGS.batch_size, 64, 64, 1])
     with slim.arg_scope(resnet_arg_scope(is_training=(MODE == "train"))):
         net, enpoints = resnet_v2.resnet_v2_50(
             x, num_classes=FLAGS.label_size)
     net = tf.reshape(net, [FLAGS.batch_size, FLAGS.label_size])
     return net
def PSPNet(inputs , num_classes, is_training):
	'''A TensorFlow implementation of PSPNet model based on 
	   https://github.com/hszhao/PSPNet/tree/master/evaluation/prototxt	   	
	
	Args:
		inputs: A 4-D tensor with dimensions [batch_size, height, width, channels]
		num_classes: Integer, the total number of categories in the dataset
		is_training : Bool, whether to updates the running means and variances during the training.
	Returns:
		A score map with dimensions [batch_size, 1/8*height, 1/8*width, num_classes]

	'''
	with slim.arg_scope(resnet_utils.resnet_arg_scope()):
		net, end_points = resnet_v2.resnet_v2_101(inputs,
							                      num_classes=None,
							                      is_training=is_training,
							                      global_pool=False,
							                      output_stride=8,
							                      reuse=None,
							                      scope='resnet_v2_101')
	with tf.variable_scope("PSPNet") as sc:    	
		shape = tf.shape(net)[1:3]
		net = PyramidPoolingModule(net, shape = shape)
		net = slim.conv2d(net, 512, [3, 3], activation_fn=None, scope='conv5')
		net = slim.batch_norm(net, fused=True, scope='conv5_bn')
		net = tf.nn.relu(net, name='conv5_bn_relu')
		logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, scope='logits')
		return logits
Example #5
0
 def testEndPointsV1(self):
   """Test the end points of a tiny v1 bottleneck network."""
   bottleneck = resnet_v1.bottleneck
   blocks = [
       resnet_utils.Block('block1', bottleneck, [(4, 1, 1), (4, 1, 2)]),
       resnet_utils.Block('block2', bottleneck, [(8, 2, 1), (8, 2, 1)])
   ]
   inputs = create_test_input(2, 32, 16, 3)
   with arg_scope(resnet_utils.resnet_arg_scope()):
     _, end_points = self._resnet_plain(inputs, blocks, scope='tiny')
   expected = [
       'tiny/block1/unit_1/bottleneck_v1/shortcut',
       'tiny/block1/unit_1/bottleneck_v1/shortcut/BatchNorm',
       'tiny/block1/unit_1/bottleneck_v1/conv1',
       'tiny/block1/unit_1/bottleneck_v1/conv2',
       'tiny/block1/unit_1/bottleneck_v1/conv3',
       'tiny/block1/unit_1/bottleneck_v1/conv3/BatchNorm',
       'tiny/block1/unit_2/bottleneck_v1/conv1',
       'tiny/block1/unit_2/bottleneck_v1/conv2',
       'tiny/block1/unit_2/bottleneck_v1/conv3',
       'tiny/block1/unit_2/bottleneck_v1/conv3/BatchNorm',
       'tiny/block2/unit_1/bottleneck_v1/shortcut',
       'tiny/block2/unit_1/bottleneck_v1/shortcut/BatchNorm',
       'tiny/block2/unit_1/bottleneck_v1/conv1',
       'tiny/block2/unit_1/bottleneck_v1/conv2',
       'tiny/block2/unit_1/bottleneck_v1/conv3',
       'tiny/block2/unit_1/bottleneck_v1/conv3/BatchNorm',
       'tiny/block2/unit_2/bottleneck_v1/conv1',
       'tiny/block2/unit_2/bottleneck_v1/conv2',
       'tiny/block2/unit_2/bottleneck_v1/conv3',
       'tiny/block2/unit_2/bottleneck_v1/conv3/BatchNorm'
   ]
   self.assertItemsEqual(expected, end_points)
Example #6
0
def extract_features(inputs, contexts, is_training):

    # TODO - Add skip connections between conv-deconv layers
    with slim.arg_scope(resnet_utils.resnet_arg_scope(is_training=is_training)):

        conv1 = tf.layers.conv2d(inputs, filters=128, kernel_size=3, strides=1, padding='same')

        context_layer = expand_context(contexts, height, width)

        contexted_batch = tf.concat([conv1, context_layer], axis=3)

        net, end_points = resnet_v2.resnet_v2_101(contexted_batch,
                                                None,
                                                global_pool=False,
                                                output_stride=16)

        deconv1 = deconv_block(net,num_inputs=2048, num_outputs=1024,
                               is_training=is_training, scope='deconv1')

        deconv2 = deconv_block(deconv1, num_inputs=1024, num_outputs=512,
                               stride=2, is_training=is_training, scope='deconv2')

        deconv3 = deconv_block(deconv2, num_inputs=512, num_outputs=256,
                               stride=2, is_training=is_training, scope='deconv3')

        deconv4 = deconv_block(deconv3, num_inputs=256,num_outputs=128,
                               stride=2, is_training=is_training, scope='deconv4')

        deconv5 = deconv_block(deconv4, num_inputs=128,num_outputs=64,
                               stride=2, is_training=is_training, scope='deconv5')

    return deconv5, net
 def testEndPointsV2(self):
   """Test the end points of a tiny v2 bottleneck network."""
   blocks = [
       resnet_v2.resnet_v2_block(
           'block1', base_depth=1, num_units=2, stride=2),
       resnet_v2.resnet_v2_block(
           'block2', base_depth=2, num_units=2, stride=1),
   ]
   inputs = create_test_input(2, 32, 16, 3)
   with arg_scope(resnet_utils.resnet_arg_scope()):
     _, end_points = self._resnet_plain(inputs, blocks, scope='tiny')
   expected = [
       'tiny/block1/unit_1/bottleneck_v2/shortcut',
       'tiny/block1/unit_1/bottleneck_v2/conv1',
       'tiny/block1/unit_1/bottleneck_v2/conv2',
       'tiny/block1/unit_1/bottleneck_v2/conv3',
       'tiny/block1/unit_2/bottleneck_v2/conv1',
       'tiny/block1/unit_2/bottleneck_v2/conv2',
       'tiny/block1/unit_2/bottleneck_v2/conv3',
       'tiny/block2/unit_1/bottleneck_v2/shortcut',
       'tiny/block2/unit_1/bottleneck_v2/conv1',
       'tiny/block2/unit_1/bottleneck_v2/conv2',
       'tiny/block2/unit_1/bottleneck_v2/conv3',
       'tiny/block2/unit_2/bottleneck_v2/conv1',
       'tiny/block2/unit_2/bottleneck_v2/conv2',
       'tiny/block2/unit_2/bottleneck_v2/conv3'
   ]
   self.assertItemsEqual(expected, end_points)
 def _encoder(self, input_images, scope_name = "encoder", trainable = True, scope_reuse = False):
     with arg_scope(resnet_utils.resnet_arg_scope()):
         output, end_points = resnet_v2.resnet_v2_50(input_images, output_stride=8, global_pool=False,reuse=scope_reuse)#(256, 256, 2048)==>(32, 32, 2048)
         hidden_state = decoder_layer(output, out_channels = self.lstm_channel, stride = 1, scope_name = 'encoder_layer1', trainable = trainable)#(32, 32, 2048)==>(32, 32, 512)
         print hidden_state.get_shape()
         tf.summary.histogram(hidden_state.op.name + "/activation", hidden_state)
         return hidden_state
 def testAtrousFullyConvolutionalValues(self):
   """Verify dense feature extraction with atrous convolution."""
   nominal_stride = 32
   for output_stride in [4, 8, 16, 32, None]:
     with arg_scope(resnet_utils.resnet_arg_scope()):
       with ops.Graph().as_default():
         with self.test_session() as sess:
           random_seed.set_random_seed(0)
           inputs = create_test_input(2, 81, 81, 3)
           # Dense feature extraction followed by subsampling.
           output, _ = self._resnet_small(
               inputs,
               None,
               is_training=False,
               global_pool=False,
               output_stride=output_stride)
           if output_stride is None:
             factor = 1
           else:
             factor = nominal_stride // output_stride
           output = resnet_utils.subsample(output, factor)
           # Make the two networks use the same weights.
           variable_scope.get_variable_scope().reuse_variables()
           # Feature extraction at the nominal network rate.
           expected, _ = self._resnet_small(
               inputs, None, is_training=False, global_pool=False)
           sess.run(variables.global_variables_initializer())
           self.assertAllClose(
               output.eval(), expected.eval(), atol=1e-4, rtol=1e-4)
Example #10
0
def Deeplab_v2(inputs , num_classes, is_training):
	'''A TensorFlow implementation of Deeplab_v2 model based on 
	   http://liangchiehchen.com/projects/DeepLabv2_resnet.html		
	
	Args:
		inputs: A 4-D tensor with dimensions [batch_size, height, width, channels]
		num_classes: Integer, the total number of categories in the dataset
		is_training : Bool, whether to updates the running means and variances during the training.
	Returns:
		A score map with dimensions [batch_size, 1/8*height, 1/8*width, num_classes]

	'''
	with slim.arg_scope(resnet_utils.resnet_arg_scope()) as sc:
		net, _ = resnet_v2.resnet_v2_101(inputs,
					                    num_classes=None,
					                    is_training=is_training,
					                    global_pool=False,
					                    output_stride=8,	
					                    reuse=None,
					                    scope='resnet_v2_101')
	
	# ASPP module without BN layers
	with tf.variable_scope('Deeplab_v2'):
		pool6 = slim.conv2d(net, num_classes, [3,3], rate=6, activation_fn=None, scope='pool6')
		pool12 = slim.conv2d(net, num_classes, [3,3], rate=12, activation_fn=None, scope='pool12')
		pool18 = slim.conv2d(net, num_classes, [3,3], rate=18, activation_fn=None, scope='pool18')
		pool24 = slim.conv2d(net, num_classes, [3,3], rate=24, activation_fn=None, scope='pool24')
		logits = tf.add_n([pool6, pool12, pool18, pool24], name='logits')
		return logits
Example #11
0
 def testEndPointsV2(self):
   """Test the end points of a tiny v2 bottleneck network."""
   bottleneck = resnet_v2.bottleneck
   blocks = [
       resnet_utils.Block('block1', bottleneck, [(4, 1, 1), (4, 1, 2)]),
       resnet_utils.Block('block2', bottleneck, [(8, 2, 1), (8, 2, 1)])
   ]
   inputs = create_test_input(2, 32, 16, 3)
   with arg_scope(resnet_utils.resnet_arg_scope()):
     _, end_points = self._resnet_plain(inputs, blocks, scope='tiny')
   expected = [
       'tiny/block1/unit_1/bottleneck_v2/shortcut',
       'tiny/block1/unit_1/bottleneck_v2/conv1',
       'tiny/block1/unit_1/bottleneck_v2/conv2',
       'tiny/block1/unit_1/bottleneck_v2/conv3',
       'tiny/block1/unit_2/bottleneck_v2/conv1',
       'tiny/block1/unit_2/bottleneck_v2/conv2',
       'tiny/block1/unit_2/bottleneck_v2/conv3',
       'tiny/block2/unit_1/bottleneck_v2/shortcut',
       'tiny/block2/unit_1/bottleneck_v2/conv1',
       'tiny/block2/unit_1/bottleneck_v2/conv2',
       'tiny/block2/unit_1/bottleneck_v2/conv3',
       'tiny/block2/unit_2/bottleneck_v2/conv1',
       'tiny/block2/unit_2/bottleneck_v2/conv2',
       'tiny/block2/unit_2/bottleneck_v2/conv3'
   ]
   self.assertItemsEqual(expected, end_points)
Example #12
0
 def testClassificationEndPoints(self):
   global_pool = True
   num_classes = 10
   inputs = create_test_input(2, 224, 224, 3)
   with arg_scope(resnet_utils.resnet_arg_scope()):
     logits, end_points = self._resnet_small(
         inputs, num_classes, global_pool, scope='resnet')
   self.assertTrue(logits.op.name.startswith('resnet/logits'))
   self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])
   self.assertTrue('predictions' in end_points)
   self.assertListEqual(end_points['predictions'].get_shape().as_list(),
                        [2, 1, 1, num_classes])
 def testClassificationEndPoints(self):
   global_pool = True
   num_classes = 10
   inputs = create_test_input(2, 224, 224, 3)
   with arg_scope(resnet_utils.resnet_arg_scope()):
     logits, end_points = self._resnet_small(
         inputs, num_classes, global_pool=global_pool, scope='resnet')
   self.assertTrue(logits.op.name.startswith('resnet/logits'))
   self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])
   self.assertTrue('predictions' in end_points)
   self.assertListEqual(end_points['predictions'].get_shape().as_list(),
                        [2, 1, 1, num_classes])
Example #14
0
 def testFullyConvolutionalUnknownHeightWidth(self):
   batch = 2
   height, width = 65, 65
   global_pool = False
   inputs = create_test_input(batch, None, None, 3)
   with arg_scope(resnet_utils.resnet_arg_scope()):
     output, _ = self._resnet_small(inputs, None, global_pool)
   self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32])
   images = create_test_input(batch, height, width, 3)
   with self.test_session() as sess:
     sess.run(variables.global_variables_initializer())
     output = sess.run(output, {inputs: images.eval()})
     self.assertEqual(output.shape, (batch, 3, 3, 32))
 def testFullyConvolutionalUnknownHeightWidth(self):
   batch = 2
   height, width = 65, 65
   global_pool = False
   inputs = create_test_input(batch, None, None, 3)
   with arg_scope(resnet_utils.resnet_arg_scope()):
     output, _ = self._resnet_small(inputs, None, global_pool=global_pool)
   self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32])
   images = create_test_input(batch, height, width, 3)
   with self.test_session() as sess:
     sess.run(variables.global_variables_initializer())
     output = sess.run(output, {inputs: images.eval()})
     self.assertEqual(output.shape, (batch, 3, 3, 32))
Example #16
0
def create_resnet(image_input,
                  is_training,
                  scope="",
                  resnet_out="block4",
                  resnet_version=50,
                  cbn=None):
    """
    Create a resnet by overidding the classic batchnorm with conditional batchnorm
    :param image_input: placeholder with image
    :param is_training: are you using the resnet at training_time or test_time
    :param scope: tensorflow scope
    :param resnet_version: 50/101/152
    :param cbn: the cbn factory
    :return: the resnet output
    """

    if cbn is None:
        # assert False, "\n" \
        #               "There is a bug with classic batchnorm with slim networks (https://github.com/tensorflow/tensorflow/issues/4887). \n" \
        #               "Please use the following config -> 'cbn': {'use_cbn':true, 'excluded_scope_names': ['*']}"
        arg_sc = slim_utils.resnet_arg_scope(is_training=is_training)
    else:
        arg_sc = get_resnet_arg_scope(cbn.apply)

    # Pick the correct version of the resnet
    if resnet_version == 50:
        current_resnet = resnet_v1.resnet_v1_50
    elif resnet_version == 101:
        current_resnet = resnet_v1.resnet_v1_101
    elif resnet_version == 152:
        current_resnet = resnet_v1.resnet_v1_152
    else:
        raise ValueError("Unsupported resnet version")

    resnet_scope = os.path.join('resnet_v1_{}/'.format(resnet_version),
                                resnet_out)

    with slim.arg_scope(arg_sc):
        net, end_points = current_resnet(
            image_input, 1000)  # 1000 is the number of softmax class

    if len(scope) > 0 and not scope.endswith("/"):
        scope += "/"

    print("Use: {}".format(resnet_scope))
    out = end_points[scope + resnet_scope]

    return out
 def testFullyConvolutionalEndpointShapes(self):
   global_pool = False
   num_classes = 10
   inputs = create_test_input(2, 321, 321, 3)
   with arg_scope(resnet_utils.resnet_arg_scope()):
     _, end_points = self._resnet_small(
         inputs, num_classes, global_pool=global_pool, scope='resnet')
     endpoint_to_shape = {
         'resnet/block1': [2, 41, 41, 4],
         'resnet/block2': [2, 21, 21, 8],
         'resnet/block3': [2, 11, 11, 16],
         'resnet/block4': [2, 11, 11, 32]
     }
     for endpoint in endpoint_to_shape:
       shape = endpoint_to_shape[endpoint]
       self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
 def testClassificationShapes(self):
   global_pool = True
   num_classes = 10
   inputs = create_test_input(2, 224, 224, 3)
   with arg_scope(resnet_utils.resnet_arg_scope()):
     _, end_points = self._resnet_small(
         inputs, num_classes, global_pool=global_pool, scope='resnet')
     endpoint_to_shape = {
         'resnet/block1': [2, 28, 28, 4],
         'resnet/block2': [2, 14, 14, 8],
         'resnet/block3': [2, 7, 7, 16],
         'resnet/block4': [2, 7, 7, 32]
     }
     for endpoint in endpoint_to_shape:
       shape = endpoint_to_shape[endpoint]
       self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
Example #19
0
 def testFullyConvolutionalEndpointShapes(self):
   global_pool = False
   num_classes = 10
   inputs = create_test_input(2, 321, 321, 3)
   with arg_scope(resnet_utils.resnet_arg_scope()):
     _, end_points = self._resnet_small(
         inputs, num_classes, global_pool, scope='resnet')
     endpoint_to_shape = {
         'resnet/block1': [2, 41, 41, 4],
         'resnet/block2': [2, 21, 21, 8],
         'resnet/block3': [2, 11, 11, 16],
         'resnet/block4': [2, 11, 11, 32]
     }
     for endpoint in endpoint_to_shape:
       shape = endpoint_to_shape[endpoint]
       self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
Example #20
0
 def testClassificationShapes(self):
   global_pool = True
   num_classes = 10
   inputs = create_test_input(2, 224, 224, 3)
   with arg_scope(resnet_utils.resnet_arg_scope()):
     _, end_points = self._resnet_small(
         inputs, num_classes, global_pool, scope='resnet')
     endpoint_to_shape = {
         'resnet/block1': [2, 28, 28, 4],
         'resnet/block2': [2, 14, 14, 8],
         'resnet/block3': [2, 7, 7, 16],
         'resnet/block4': [2, 7, 7, 32]
     }
     for endpoint in endpoint_to_shape:
       shape = endpoint_to_shape[endpoint]
       self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
Example #21
0
    def testAtrousValuesBottleneck(self):
        """Verify the values of dense feature extraction by atrous convolution.

    Make sure that dense feature extraction by stack_blocks_dense() followed by
    subsampling gives identical results to feature extraction at the nominal
    network output stride using the simple self._stack_blocks_nondense() above.
    """
        block = resnet_v1.resnet_v1_block
        blocks = [
            block('block1', base_depth=1, num_units=2, stride=2),
            block('block2', base_depth=2, num_units=2, stride=2),
            block('block3', base_depth=4, num_units=2, stride=2),
            block('block4', base_depth=8, num_units=2, stride=1),
        ]
        nominal_stride = 8

        # Test both odd and even input dimensions.
        height = 30
        width = 31
        with arg_scope(resnet_utils.resnet_arg_scope()):
            with arg_scope([layers.batch_norm], is_training=False):
                for output_stride in [1, 2, 4, 8, None]:
                    with ops.Graph().as_default():
                        with self.cached_session() as sess:
                            random_seed.set_random_seed(0)
                            inputs = create_test_input(1, height, width, 3)
                            # Dense feature extraction followed by subsampling.
                            output = resnet_utils.stack_blocks_dense(
                                inputs, blocks, output_stride)
                            if output_stride is None:
                                factor = 1
                            else:
                                factor = nominal_stride // output_stride

                            output = resnet_utils.subsample(output, factor)
                            # Make the two networks use the same weights.
                            variable_scope.get_variable_scope(
                            ).reuse_variables()
                            # Feature extraction at the nominal network rate.
                            expected = self._stack_blocks_nondense(
                                inputs, blocks)
                            sess.run(variables.global_variables_initializer())
                            output, expected = sess.run([output, expected])
                            self.assertAllClose(output,
                                                expected,
                                                atol=1e-4,
                                                rtol=1e-4)
Example #22
0
def create_resnet(image_input,
                  is_training,
                  scope="",
                  resnet_out="block4",
                  resnet_version=50,
                  cbn=None):
    """
    Create a resnet by overidding the classic batchnorm with conditional batchnorm
    :param image_input: placeholder with image
    :param is_training: are you using the resnet at training_time or test_time
    :param scope: tensorflow scope
    :param resnet_version: 50/101/152
    :param cbn: the cbn factory
    :return: the resnet output
    """

    if cbn is None:
        arg_sc = slim_utils.resnet_arg_scope()
    else:
        arg_sc = get_resnet_arg_scope(cbn.apply)

    # Pick the correct version of the resnet
    if resnet_version == 50:
        current_resnet = resnet_v1.resnet_v1_50
    elif resnet_version == 101:
        current_resnet = resnet_v1.resnet_v1_101
    elif resnet_version == 152:
        current_resnet = resnet_v1.resnet_v1_152
    else:
        raise ValueError("Unsupported resnet version")

    resnet_scope = os.path.join('resnet_v1_{}/'.format(resnet_version),
                                resnet_out)

    with slim.arg_scope(arg_sc):
        net, end_points = current_resnet(
            image_input, 1000,
            is_training=is_training)  # 1000 is the number of softmax class

    if len(scope) > 0 and not scope.endswith("/"):
        scope += "/"

    print("Use: {}".format(resnet_scope))
    out = end_points[scope + resnet_scope]

    return out
Example #23
0
    def _atrousValues(self, bottleneck):
        """Verify the values of dense feature extraction by atrous convolution.

    Make sure that dense feature extraction by stack_blocks_dense() followed by
    subsampling gives identical results to feature extraction at the nominal
    network output stride using the simple self._stack_blocks_nondense() above.

    Args:
      bottleneck: The bottleneck function.
    """
        blocks = [
            resnet_utils.Block('block1', bottleneck, [(4, 1, 1), (4, 1, 2)]),
            resnet_utils.Block('block2', bottleneck, [(8, 2, 1), (8, 2, 2)]),
            resnet_utils.Block('block3', bottleneck, [(16, 4, 1), (16, 4, 2)]),
            resnet_utils.Block('block4', bottleneck, [(32, 8, 1), (32, 8, 1)])
        ]
        nominal_stride = 8

        # Test both odd and even input dimensions.
        height = 30
        width = 31
        with arg_scope(resnet_utils.resnet_arg_scope(is_training=False)):
            for output_stride in [1, 2, 4, 8, None]:
                with ops.Graph().as_default():
                    with self.test_session() as sess:
                        random_seed.set_random_seed(0)
                        inputs = create_test_input(1, height, width, 3)
                        # Dense feature extraction followed by subsampling.
                        output = resnet_utils.stack_blocks_dense(
                            inputs, blocks, output_stride)
                        if output_stride is None:
                            factor = 1
                        else:
                            factor = nominal_stride // output_stride

                        output = resnet_utils.subsample(output, factor)
                        # Make the two networks use the same weights.
                        variable_scope.get_variable_scope().reuse_variables()
                        # Feature extraction at the nominal network rate.
                        expected = self._stack_blocks_nondense(inputs, blocks)
                        sess.run(variables.global_variables_initializer())
                        output, expected = sess.run([output, expected])
                        self.assertAllClose(output,
                                            expected,
                                            atol=1e-4,
                                            rtol=1e-4)
Example #24
0
 def testUnknownBatchSize(self):
   batch = 2
   height, width = 65, 65
   global_pool = True
   num_classes = 10
   inputs = create_test_input(None, height, width, 3)
   with arg_scope(resnet_utils.resnet_arg_scope()):
     logits, _ = self._resnet_small(
         inputs, num_classes, global_pool, scope='resnet')
   self.assertTrue(logits.op.name.startswith('resnet/logits'))
   self.assertListEqual(logits.get_shape().as_list(),
                        [None, 1, 1, num_classes])
   images = create_test_input(batch, height, width, 3)
   with self.test_session() as sess:
     sess.run(variables.global_variables_initializer())
     output = sess.run(logits, {inputs: images.eval()})
     self.assertEqual(output.shape, (batch, 1, 1, num_classes))
Example #25
0
def slim_resnet_v2_50(net_in,
                      num_classes=1000,
                      weight_decay=0.0001,
                      is_training=False,
                      reuse=None):
    net_in = tl.layers.InputLayer(net_in, name='input_layer')
    with slim.arg_scope(
            resnet_arg_scope(is_training=is_training,
                             weight_decay=weight_decay)):
        network = tl.layers.SlimNetsLayer(
            layer=net_in,
            slim_layer=resnet_v2_50,
            slim_args={
                'num_classes': num_classes,
            },
            name='resnet_v2_50')  # same with the ckpt mode
    return network
 def testUnknownBatchSize(self):
   batch = 2
   height, width = 65, 65
   global_pool = True
   num_classes = 10
   inputs = create_test_input(None, height, width, 3)
   with arg_scope(resnet_utils.resnet_arg_scope()):
     logits, _ = self._resnet_small(
         inputs, num_classes, global_pool=global_pool, scope='resnet')
   self.assertTrue(logits.op.name.startswith('resnet/logits'))
   self.assertListEqual(logits.get_shape().as_list(),
                        [None, 1, 1, num_classes])
   images = create_test_input(batch, height, width, 3)
   with self.test_session() as sess:
     sess.run(variables.global_variables_initializer())
     output = sess.run(logits, {inputs: images.eval()})
     self.assertEqual(output.shape, (batch, 1, 1, num_classes))
Example #27
0
  def _atrousValues(self, bottleneck):
    """Verify the values of dense feature extraction by atrous convolution.

    Make sure that dense feature extraction by stack_blocks_dense() followed by
    subsampling gives identical results to feature extraction at the nominal
    network output stride using the simple self._stack_blocks_nondense() above.

    Args:
      bottleneck: The bottleneck function.
    """
    blocks = [
        resnet_utils.Block('block1', bottleneck, [(4, 1, 1), (4, 1, 2)]),
        resnet_utils.Block('block2', bottleneck, [(8, 2, 1), (8, 2, 2)]),
        resnet_utils.Block('block3', bottleneck, [(16, 4, 1), (16, 4, 2)]),
        resnet_utils.Block('block4', bottleneck, [(32, 8, 1), (32, 8, 1)])
    ]
    nominal_stride = 8

    # Test both odd and even input dimensions.
    height = 30
    width = 31
    with arg_scope(resnet_utils.resnet_arg_scope(is_training=False)):
      for output_stride in [1, 2, 4, 8, None]:
        with ops.Graph().as_default():
          with self.test_session() as sess:
            random_seed.set_random_seed(0)
            inputs = create_test_input(1, height, width, 3)
            # Dense feature extraction followed by subsampling.
            output = resnet_utils.stack_blocks_dense(inputs, blocks,
                                                     output_stride)
            if output_stride is None:
              factor = 1
            else:
              factor = nominal_stride // output_stride

            output = resnet_utils.subsample(output, factor)
            # Make the two networks use the same weights.
            variable_scope.get_variable_scope().reuse_variables()
            # Feature extraction at the nominal network rate.
            expected = self._stack_blocks_nondense(inputs, blocks)
            sess.run(variables.global_variables_initializer())
            output, expected = sess.run([output, expected])
            self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)
  def testAtrousValuesBottleneck(self):
    """Verify the values of dense feature extraction by atrous convolution.

    Make sure that dense feature extraction by stack_blocks_dense() followed by
    subsampling gives identical results to feature extraction at the nominal
    network output stride using the simple self._stack_blocks_nondense() above.
    """
    block = resnet_v2.resnet_v2_block
    blocks = [
        block('block1', base_depth=1, num_units=2, stride=2),
        block('block2', base_depth=2, num_units=2, stride=2),
        block('block3', base_depth=4, num_units=2, stride=2),
        block('block4', base_depth=8, num_units=2, stride=1),
    ]
    nominal_stride = 8

    # Test both odd and even input dimensions.
    height = 30
    width = 31
    with arg_scope(resnet_utils.resnet_arg_scope()):
      with arg_scope([layers.batch_norm], is_training=False):
        for output_stride in [1, 2, 4, 8, None]:
          with ops.Graph().as_default():
            with self.test_session() as sess:
              random_seed.set_random_seed(0)
              inputs = create_test_input(1, height, width, 3)
              # Dense feature extraction followed by subsampling.
              output = resnet_utils.stack_blocks_dense(inputs, blocks,
                                                       output_stride)
              if output_stride is None:
                factor = 1
              else:
                factor = nominal_stride // output_stride

              output = resnet_utils.subsample(output, factor)
              # Make the two networks use the same weights.
              variable_scope.get_variable_scope().reuse_variables()
              # Feature extraction at the nominal network rate.
              expected = self._stack_blocks_nondense(inputs, blocks)
              sess.run(variables.global_variables_initializer())
              output, expected = sess.run([output, expected])
              self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)
Example #29
0
    assert False, "Invalid mode: {}".format(args.mode)


# Define the output folder
out_file = "gw_{mode}_{network}_{feature_name}_{size}".format(
    mode=args.mode, network=args.network, feature_name=args.feature_name, size=args.img_size)


print("Create networks...")
if args.network == "resnet":
    ft_output = resnet.create_resnet(images,
                                     resnet_out=args.feature_name,
                                     resnet_version=args.resnet_version,
                                     is_training=False)
    # create network
    with slim.arg_scope(slim_utils.resnet_arg_scope(is_training=False)):
        _, end_points = resnet_v1.resnet_v1_152(images, 1000)  # 1000 is the number of softmax class


elif args.network == "vgg":
    _, end_points = vgg.vgg_16(images, is_training=False, dropout_keep_prob=1.0)
    ft_name = os.path.join("vgg_16", args.feature_name)
    ft_output = end_points[ft_name]
else:
    assert False, "Incorrect Network"


extract_features(
    img_input = images,
    ft_output = ft_output,
    dataset_cstor = dataset_cstor,
def GCN(inputs, num_classes, is_training):
    '''A TensorFlow implementation of GCN model based on 
	   "Large Kernel Matters -- Improve Semantic Segmentation by Global Convolutional Network"
	
	Args:
		inputs: A 4-D tensor with dimensions [batch_size, height, width, channels]
		num_classes: Integer, the total number of categories in the dataset
		is_training : Bool, whether to updates the running means and variances during the training.
	Returns:
		A score map with dimensions [batch_size, height, width, num_classes]

	'''
    with slim.arg_scope(resnet_utils.resnet_arg_scope()):
        net, end_points = resnet_v2.resnet_v2_101(inputs,
                                                  num_classes=None,
                                                  is_training=is_training,
                                                  global_pool=False,
                                                  output_stride=None,
                                                  reuse=None,
                                                  scope='resnet_v2_101')
    block1 = end_points[
        "resnet_v2_101/block1/unit_2/bottleneck_v2"]  # (56, 56, 256)
    block2 = end_points[
        "resnet_v2_101/block2/unit_3/bottleneck_v2"]  # (28, 28, 512)
    block3 = end_points[
        "resnet_v2_101/block3/unit_22/bottleneck_v2"]  # (14, 14, 1024)
    block4 = net  # (7, 7, 2048)

    with tf.variable_scope("gcn") as sc:
        down5 = GlobalConvBlock(block4, num_classes=num_classes)
        down5 = BoundaryRefinementBlock(down5,
                                        num_classes=num_classes,
                                        kernel_size=[3, 3])
        down5 = slim.conv2d_transpose(down5,
                                      num_classes,
                                      kernel_size=[4, 4],
                                      stride=2,
                                      activation_fn=None)  # (14, 14, 21)

        down4 = GlobalConvBlock(block3, num_classes=num_classes)
        down4 = BoundaryRefinementBlock(down4,
                                        num_classes=num_classes,
                                        kernel_size=[3, 3])
        down4 = tf.add(down4, down5)
        down4 = BoundaryRefinementBlock(down4,
                                        num_classes=num_classes,
                                        kernel_size=[3, 3])
        down4 = slim.conv2d_transpose(down4,
                                      num_classes,
                                      kernel_size=[4, 4],
                                      stride=2,
                                      activation_fn=None)  # (28, 28, 21)

        down3 = GlobalConvBlock(block2, num_classes=num_classes)
        down3 = BoundaryRefinementBlock(down3,
                                        num_classes=num_classes,
                                        kernel_size=[3, 3])
        down3 = tf.add(down3, down4)
        down3 = BoundaryRefinementBlock(down3,
                                        num_classes=num_classes,
                                        kernel_size=[3, 3])
        down3 = slim.conv2d_transpose(down3,
                                      num_classes,
                                      kernel_size=[4, 4],
                                      stride=2,
                                      activation_fn=None)  # (56, 56, 21)

        down2 = GlobalConvBlock(block1, num_classes=num_classes)
        down2 = BoundaryRefinementBlock(down2,
                                        num_classes=num_classes,
                                        kernel_size=[3, 3])
        down2 = tf.add(down2, down3)
        down2 = BoundaryRefinementBlock(down2,
                                        num_classes=num_classes,
                                        kernel_size=[3, 3])
        down2 = slim.conv2d_transpose(down2,
                                      num_classes,
                                      kernel_size=[4, 4],
                                      stride=2,
                                      activation_fn=None)  # (112, 112, 21)

        output = BoundaryRefinementBlock(down2,
                                         num_classes=num_classes,
                                         kernel_size=[3, 3])
        output = slim.conv2d_transpose(output,
                                       num_classes,
                                       kernel_size=[4, 4],
                                       stride=2,
                                       activation_fn=None)  # (224, 224, 21)
        output = BoundaryRefinementBlock(output,
                                         num_classes=num_classes,
                                         kernel_size=[3, 3])
        return output
Example #31
0
def create_resnet(image_input,
                  is_training,
                  scope="",
                  scope_feature="",
                  resnet_out="logits",
                  resnet_version=50,
                  cbn=None):
    """
    Create a resnet by overidding the classic batchnorm with conditional batchnorm
    :param image_input: placeholder with image
    :param is_training: are you using the resnet at training_time or test_time
    :param scope: tensorflow scope
    :param resnet_version: 50/101/152
    :param cbn: the cbn factory
    :return: the resnet output
    """

    # print("resnet_out = {}".format(resnet_out))

    if cbn is None:
        # assert False, "\n" \
        #               "There is a bug with classic batchnorm with slim networks (https://github.com/tensorflow/tensorflow/issues/4887). \n" \
        #               "Please use the following config -> 'cbn': {'use_cbn':true, 'excluded_scope_names': ['*']}"
        # arg_sc = slim_utils.resnet_arg_scope(is_training=is_training)

        arg_sc = slim_utils.resnet_arg_scope()
        # print("arg_sc = {}".format(arg_sc))
    else:
        arg_sc = get_resnet_arg_scope(cbn.apply)

    # Pick the correct version of the resnet
    if resnet_version == 50:
        print("------ 50")
        current_resnet = resnet_v1.resnet_v1_50
    elif resnet_version == 101:
        print("------ 101")
        current_resnet = resnet_v1.resnet_v1_101
    elif resnet_version == 152:
        print("------ 152")
        current_resnet = resnet_v1.resnet_v1_152
    else:
        raise ValueError("Unsupported resnet version")

    resnet_scope = os.path.join('resnet_v1_{}/'.format(resnet_version),
                                resnet_out)
    # print(" resnet_out = {} , resnet_scope = {}".format(resnet_out,resnet_scope))

    # exit()
    # print("current_resnet = {}".format(current_resnet))
    # exit()

    with slim.arg_scope(arg_sc):
        net, end_points = current_resnet(
            image_input, 1000,
            scope="resnet_v1_50")  # 1000 is the number of softmax class

    print("net = {}, end_points = {}".format(net, end_points))

    # print(" resnet | endpoint=",end_points)
    if len(scope) > 0 and not scope.endswith("/"):
        scope += "/"

    out = end_points[
        scope +
        resnet_scope]  # Tensor("oracle/resnet_v1_50/block4/unit_3/bottleneck_v1/Relu:0", shape=(32, 7, 7, 2048), dtype=float32)

    # print("------------------------- out Use: {},output = {}".format(resnet_scope,out))
    # out = tf.reshape(
    # out,
    # [-1,out.shape[3]],
    # )

    # print("-- net = {}, end_points={},out={} ".format(net,end_points,out))
    # exit()

    return out, end_points
Example #32
0
 def arg_scope(self, weight_decay=0.0005, is_training=None):
     return resnet_utils.resnet_arg_scope(weight_decay=weight_decay,
                                          is_training=None)
    def _testDeprecatingIsTraining(self, network_fn):
        batch_norm_fn = layers.batch_norm

        @add_arg_scope
        def batch_norm_expect_is_training(*args, **kwargs):
            assert kwargs['is_training']
            return batch_norm_fn(*args, **kwargs)

        @add_arg_scope
        def batch_norm_expect_is_not_training(*args, **kwargs):
            assert not kwargs['is_training']
            return batch_norm_fn(*args, **kwargs)

        global_pool = True
        num_classes = 10
        inputs = create_test_input(2, 224, 224, 3)

        # Default argument for resnet_arg_scope
        layers.batch_norm = batch_norm_expect_is_training
        with arg_scope(resnet_utils.resnet_arg_scope()):
            network_fn(inputs,
                       num_classes,
                       global_pool=global_pool,
                       scope='resnet1')

        layers.batch_norm = batch_norm_expect_is_training
        with arg_scope(resnet_utils.resnet_arg_scope()):
            network_fn(inputs,
                       num_classes,
                       is_training=True,
                       global_pool=global_pool,
                       scope='resnet2')

        layers.batch_norm = batch_norm_expect_is_not_training
        with arg_scope(resnet_utils.resnet_arg_scope()):
            network_fn(inputs,
                       num_classes,
                       is_training=False,
                       global_pool=global_pool,
                       scope='resnet3')

        # resnet_arg_scope with is_training set to True (deprecated)
        layers.batch_norm = batch_norm_expect_is_training
        with arg_scope(resnet_utils.resnet_arg_scope(is_training=True)):
            network_fn(inputs,
                       num_classes,
                       global_pool=global_pool,
                       scope='resnet4')

        layers.batch_norm = batch_norm_expect_is_training
        with arg_scope(resnet_utils.resnet_arg_scope(is_training=True)):
            network_fn(inputs,
                       num_classes,
                       is_training=True,
                       global_pool=global_pool,
                       scope='resnet5')

        layers.batch_norm = batch_norm_expect_is_not_training
        with arg_scope(resnet_utils.resnet_arg_scope(is_training=True)):
            network_fn(inputs,
                       num_classes,
                       is_training=False,
                       global_pool=global_pool,
                       scope='resnet6')

        # resnet_arg_scope with is_training set to False (deprecated)
        layers.batch_norm = batch_norm_expect_is_not_training
        with arg_scope(resnet_utils.resnet_arg_scope(is_training=False)):
            network_fn(inputs,
                       num_classes,
                       global_pool=global_pool,
                       scope='resnet7')

        layers.batch_norm = batch_norm_expect_is_training
        with arg_scope(resnet_utils.resnet_arg_scope(is_training=False)):
            network_fn(inputs,
                       num_classes,
                       is_training=True,
                       global_pool=global_pool,
                       scope='resnet8')

        layers.batch_norm = batch_norm_expect_is_not_training
        with arg_scope(resnet_utils.resnet_arg_scope(is_training=False)):
            network_fn(inputs,
                       num_classes,
                       is_training=False,
                       global_pool=global_pool,
                       scope='resnet9')

        layers.batch_norm = batch_norm_fn
  def _testDeprecatingIsTraining(self, network_fn):
    batch_norm_fn = layers.batch_norm

    @add_arg_scope
    def batch_norm_expect_is_training(*args, **kwargs):
      assert kwargs['is_training']
      return batch_norm_fn(*args, **kwargs)

    @add_arg_scope
    def batch_norm_expect_is_not_training(*args, **kwargs):
      assert not kwargs['is_training']
      return batch_norm_fn(*args, **kwargs)

    global_pool = True
    num_classes = 10
    inputs = create_test_input(2, 224, 224, 3)

    # Default argument for resnet_arg_scope
    layers.batch_norm = batch_norm_expect_is_training
    with arg_scope(resnet_utils.resnet_arg_scope()):
      network_fn(inputs, num_classes, global_pool=global_pool, scope='resnet1')

    layers.batch_norm = batch_norm_expect_is_training
    with arg_scope(resnet_utils.resnet_arg_scope()):
      network_fn(
          inputs,
          num_classes,
          is_training=True,
          global_pool=global_pool,
          scope='resnet2')

    layers.batch_norm = batch_norm_expect_is_not_training
    with arg_scope(resnet_utils.resnet_arg_scope()):
      network_fn(
          inputs,
          num_classes,
          is_training=False,
          global_pool=global_pool,
          scope='resnet3')

    # resnet_arg_scope with is_training set to True (deprecated)
    layers.batch_norm = batch_norm_expect_is_training
    with arg_scope(resnet_utils.resnet_arg_scope(is_training=True)):
      network_fn(inputs, num_classes, global_pool=global_pool, scope='resnet4')

    layers.batch_norm = batch_norm_expect_is_training
    with arg_scope(resnet_utils.resnet_arg_scope(is_training=True)):
      network_fn(
          inputs,
          num_classes,
          is_training=True,
          global_pool=global_pool,
          scope='resnet5')

    layers.batch_norm = batch_norm_expect_is_not_training
    with arg_scope(resnet_utils.resnet_arg_scope(is_training=True)):
      network_fn(
          inputs,
          num_classes,
          is_training=False,
          global_pool=global_pool,
          scope='resnet6')

    # resnet_arg_scope with is_training set to False (deprecated)
    layers.batch_norm = batch_norm_expect_is_not_training
    with arg_scope(resnet_utils.resnet_arg_scope(is_training=False)):
      network_fn(inputs, num_classes, global_pool=global_pool, scope='resnet7')

    layers.batch_norm = batch_norm_expect_is_training
    with arg_scope(resnet_utils.resnet_arg_scope(is_training=False)):
      network_fn(
          inputs,
          num_classes,
          is_training=True,
          global_pool=global_pool,
          scope='resnet8')

    layers.batch_norm = batch_norm_expect_is_not_training
    with arg_scope(resnet_utils.resnet_arg_scope(is_training=False)):
      network_fn(
          inputs,
          num_classes,
          is_training=False,
          global_pool=global_pool,
          scope='resnet9')

    layers.batch_norm = batch_norm_fn
Example #35
0
x = tf.placeholder(tf.float32, shape=[batch_size, height, width, 6])
y_ = tf.placeholder(tf.float32, shape=[batch_size, height, width, 1])

network = tl.layers.InputLayer(x, name='input')

network = tl.layers.Conv2d(network, 16, (3, 3), (1, 1), act=tf.nn.relu, padding='VALID', name='conv1')
network = tl.layers.Conv2d(network, 16, (3, 3), (1, 1), act=tf.nn.relu, padding='VALID', name='conv2')
network = tl.layers.Conv2d(network, 32, (3, 3), (1, 1), act=tf.nn.relu, padding='VALID', name='conv3')
network = tl.layers.MaxPool2d(network, (2, 2), (2, 2), padding='VALID', name='pool1')

network = tl.layers.Conv2d(network, 32, (3, 3), (1, 1), act=tf.nn.relu, padding='VALID', name='conv4')
network = tl.layers.Conv2d(network, 32, (3, 3), (1, 1), act=tf.nn.relu, padding='VALID', name='conv5')
network = tl.layers.Conv2dLayer(network, shape = [1, 1, 32, 3], act=tf.nn.relu, name='conv6', padding='VALID')
network = tl.layers.MaxPool2d(network, (2, 2), (2, 2), padding='VALID', name='pool2')

with slim.arg_scope(resnet_arg_scope()):
    network = tl.layers.SlimNetsLayer(layer=network,
                                  slim_layer=resnet_v2_50,
                                  slim_args={'num_classes':None,
                                             'is_training':True,
                                             'global_pool':False,
                                             'output_stride':16,
                                             # 'inputs' : [batch_size, 299, 299, 3],
                                             },
                                  name='resnet_v2_50'  # <-- the name should be the same with the ckpt model
                                        )
# network = tl.layers.MaxPool2d(network, (1, 1), (1, 1), padding='SAME', name='pool3')
network.print_layers()

network = tf.layers.