Esempio n. 1
0
    def add_deconv(self, bilinear=False):
        fusion = self.get_output('fusion')

        with tf.variable_scope('deconv') as scope:
            # Learn from scratch
            if not bilinear:
                w_deconv = tf.get_variable(
                    'weights', [16, 16, self.num_classes, self.num_classes],
                    initializer=tf.truncated_normal_initializer(0.0,
                                                                stddev=0.01))
            # Using fiexed bilinearing upsampling filter
            else:
                w_deconv = tf.get_variable(
                    'weights',
                    trainable=True,
                    initializer=bilinear_upsample_weights(
                        16, self.num_classes))

            b_deconv = tf.get_variable('biases', [self.num_classes],
                                       initializer=tf.constant_initializer(0))
            z_deconv = tf.nn.conv2d_transpose(
                fusion,
                w_deconv, [
                    self.batch_num, self.max_size[0], self.max_size[1],
                    self.num_classes
                ],
                strides=[1, 8, 8, 1],
                padding='SAME',
                name='z') + b_deconv

        # Add to store dicts
        self.outputs['deconv'] = z_deconv
        self.layers['deconv'] = {'weights': w_deconv, 'biases': b_deconv}
Esempio n. 2
0
    def add_shortcut(self, bilinear=True):
        conv8 = self.get_output('conv8')
        pool4 = self.get_output('pool4')

        target_size = int(pool4.get_shape()[1])

        with tf.variable_scope('2x_conv8') as scope:
            # Learn from scratch
            if not bilinear:
                w_deconv = tf.get_variable(
                    'weights', [4, 4, self.num_classes, self.num_classes],
                    initializer=tf.truncated_normal_initializer(0.0,
                                                                stddev=0.01))
            # Using fiexed bilinearing upsampling filter
            else:
                w_deconv = tf.get_variable(
                    'weights',
                    trainable=True,
                    initializer=bilinear_upsample_weights(2, self.num_classes))

            b_deconv = tf.get_variable('biases', [self.num_classes],
                                       initializer=tf.constant_initializer(0))
            z_deconv = tf.nn.conv2d_transpose(
                conv8,
                w_deconv,
                [self.batch_num, target_size, target_size, self.num_classes],
                strides=[1, 2, 2, 1],
                padding='SAME',
                name='z') + b_deconv

        with tf.variable_scope('pool4_1x1') as scope:
            w_pool4 = tf.get_variable(
                'weights', [1, 1, 512, self.num_classes],
                initializer=tf.truncated_normal_initializer(0.0, stddev=0.01))
            b_pool4 = tf.get_variable('biases', [self.num_classes],
                                      initializer=tf.constant_initializer(0))
            z_pool4 = tf.nn.conv2d(
                pool4, w_pool4, strides=[1, 1, 1, 1], padding='SAME') + b_pool4

        # Element-wise sum
        fusion = z_deconv + z_pool4
        #pdb.set_trace()

        # Add to store dicts
        self.outputs['2x_conv8'] = z_deconv
        self.outputs['pool4_1x1'] = z_pool4
        self.outputs['fusion'] = fusion
        self.layers['2x_conv8'] = {'weights': w_deconv, 'biases': b_deconv}
        self.layers['pool4_1x1'] = {'weights': w_pool4, 'biases': b_pool4}
Esempio n. 3
0
    def add_deconv(self, bilinear=False):
        fusion = self.get_output('fusion')

        with tf.variable_scope('deconv') as scope:
            # Learn from scratch
            if not bilinear:
                w_deconv = tf.get_variable(
                    'weights', [32, 32, self.num_classes, self.num_classes],
                    initializer=tf.truncated_normal_initializer(0.0,
                                                                stddev=0.01))
            # Using fiexed bilinearing upsampling filter
            else:
                w_deconv = tf.get_variable(
                    'weights',
                    trainable=True,
                    initializer=bilinear_upsample_weights(
                        16, self.num_classes))

            b_deconv = tf.get_variable('biases', [self.num_classes],
                                       initializer=tf.constant_initializer(0))
            z_deconv = tf.nn.conv2d_transpose(
                fusion,
                w_deconv, [
                    self.batch_num, self.max_size[0], self.max_size[1],
                    self.num_classes
                ],
                strides=[1, 16, 16, 1],
                padding='SAME',
                name='z') + b_deconv
            '''
			try to add crfRnn layer,you can also uncomment it 
			'''

            crf_deconv = CrfRnnLayer(
                image_dims=(self.max_size[0], self.max_size[1]),
                num_classes=self.num_classes,
                theta_alpha=160.,
                theta_beta=3.,
                theta_gamma=3.,
                num_iterations=10,
                name='crfrnn')([z_deconv, self.img])  #shape=(1, 640, 640, 21)

        # Add to store dicts
        self.outputs[
            'deconv'] = crf_deconv  #<shape=(5, 640, 640, 21) dtype=float32>
        #self.outputs['deconv'] = z_deconv#<shape=(5, 640, 640, 21) dtype=float32>
        self.layers['deconv'] = {'weights': w_deconv, 'biases': b_deconv}
Esempio n. 4
0
    def add_deconv(self):
        conv5_3 = self.get_output('conv5_3')
        conv4_3 = self.get_output('conv4_3')
        conv3_3 = self.get_output('conv3_3')
        conv2_2 = self.get_output('conv2_2')
        conv1_2 = self.get_output('conv1_2')

        shape1 = tf.shape(conv1_2)[1]
        shape2 = tf.shape(conv1_2)[2]

        # ======== Preprocessing to align feature dim ======== #
        with tf.variable_scope('align5') as scope:
            w_conv5 = tf.get_variable(
                'weights', [1, 1, 512, 64],
                initializer=tf.truncated_normal_initializer(0.0, stddev=0.01))
            b_conv5 = tf.get_variable('biases', [64],
                                      initializer=tf.constant_initializer(0))
            z_conv5 = tf.nn.conv2d(
                conv5_3, w_conv5, strides=[1, 1, 1, 1
                                           ], padding='SAME') + b_conv5

        with tf.variable_scope('align4') as scope:
            w_conv4 = tf.get_variable(
                'weights', [1, 1, 512, 64],
                initializer=tf.truncated_normal_initializer(0.0, stddev=0.01))
            b_conv4 = tf.get_variable('biases', [64],
                                      initializer=tf.constant_initializer(0))
            z_conv4 = tf.nn.conv2d(
                conv4_3, w_conv4, strides=[1, 1, 1, 1
                                           ], padding='SAME') + b_conv4

        with tf.variable_scope('align3') as scope:
            w_conv3 = tf.get_variable(
                'weights', [1, 1, 256, 64],
                initializer=tf.truncated_normal_initializer(0.0, stddev=0.01))
            b_conv3 = tf.get_variable('biases', [64],
                                      initializer=tf.constant_initializer(0))
            z_conv3 = tf.nn.conv2d(
                conv3_3, w_conv3, strides=[1, 1, 1, 1
                                           ], padding='SAME') + b_conv3

        with tf.variable_scope('align2') as scope:
            w_conv2 = tf.get_variable(
                'weights', [1, 1, 128, 64],
                initializer=tf.truncated_normal_initializer(0.0, stddev=0.01))
            b_conv2 = tf.get_variable('biases', [64],
                                      initializer=tf.constant_initializer(0))
            z_conv2 = tf.nn.conv2d(
                conv2_2, w_conv2, strides=[1, 1, 1, 1
                                           ], padding='SAME') + b_conv2

        # ======== Upsample and skip connection ======== #
        with tf.variable_scope('deconv5') as scope:
            # Using fiexed bilinearing upsampling filter
            w_deconv5 = tf.get_variable('weights',
                                        trainable=False,
                                        initializer=bilinear_upsample_weights(
                                            32, 64))

            z_deconv5 = tf.nn.conv2d_transpose(
                z_conv5,
                w_deconv5, [self.batch_num, shape1, shape2, 64],
                strides=[1, 16, 16, 1],
                padding='SAME',
                name='z')

        with tf.variable_scope('deconv4') as scope:
            # Using fiexed bilinearing upsampling filter
            w_deconv4 = tf.get_variable('weights',
                                        trainable=False,
                                        initializer=bilinear_upsample_weights(
                                            16, 64))

            z_deconv4 = tf.nn.conv2d_transpose(
                z_conv4,
                w_deconv4, [self.batch_num, shape1, shape2, 64],
                strides=[1, 8, 8, 1],
                padding='SAME',
                name='z')

        with tf.variable_scope('deconv3') as scope:
            # Using fiexed bilinearing upsampling filter
            w_deconv3 = tf.get_variable('weights',
                                        trainable=False,
                                        initializer=bilinear_upsample_weights(
                                            8, 64))

            z_deconv3 = tf.nn.conv2d_transpose(
                z_conv3,
                w_deconv3, [self.batch_num, shape1, shape2, 64],
                strides=[1, 4, 4, 1],
                padding='SAME',
                name='z')

        with tf.variable_scope('deconv2') as scope:
            # Using fiexed bilinearing upsampling filter
            w_deconv2 = tf.get_variable('weights',
                                        trainable=False,
                                        initializer=bilinear_upsample_weights(
                                            4, 64))

            z_deconv2 = tf.nn.conv2d_transpose(
                z_conv2,
                w_deconv2, [self.batch_num, shape1, shape2, 64],
                strides=[1, 2, 2, 1],
                padding='SAME',
                name='z')

        # Old version API
        # merge = tf.concat(3, [conv1_2, z_deconv2, z_deconv3, z_deconv4, z_deconv5])
        merge = conv1_2 + z_deconv2 + z_deconv3 + z_deconv4 + z_deconv5

        # Add to store dicts
        self.outputs['align5'] = z_conv5
        self.outputs['align4'] = z_conv4
        self.outputs['align3'] = z_conv3
        self.outputs['align2'] = z_conv2
        self.outputs['deconv5'] = z_deconv5
        self.outputs['deconv4'] = z_deconv4
        self.outputs['deconv3'] = z_deconv3
        self.outputs['deconv2'] = z_deconv2
        self.outputs['merge'] = merge

        self.layers['align5'] = {'weights': w_conv5, 'biases': b_conv5}
        self.layers['align4'] = {'weights': w_conv4, 'biases': b_conv4}
        self.layers['align3'] = {'weights': w_conv3, 'biases': b_conv3}
        self.layers['align2'] = {'weights': w_conv2, 'biases': b_conv2}