def _upsampling_block(self, bottom, skip_connection, input_channels, output_channels, skip_input_channels, name='upsample', reuse=False):
     with tf.variable_scope(name, reuse=reuse):
         self._add_to_layers(name + '/deconv', sharedLayers.conv2d_transpose(
             bottom, [4, 4, output_channels, input_channels], strides=2, name='deconv'))
         self._add_to_layers(name + '/predict', sharedLayers.conv2d(bottom, [
                             3, 3, input_channels, 1], strides=1, activation=lambda x: x, name='predict'))
         self._disparities.append(self._make_disp(self._layers[name + '/predict']))
         self._add_to_layers(name + '/up_predict', sharedLayers.conv2d_transpose(self._get_layer_as_input(
             name + '/predict'), [4, 4, 1, 1], strides=2, activation=lambda x: x, name='up_predict'))
         with tf.variable_scope('join_skip'):    
             concat_inputs = tf.concat([skip_connection, self._get_layer_as_input(name + '/deconv'), self._get_layer_as_input(name + '/up_predict')], axis=3)
         self._add_to_layers(name + '/concat', sharedLayers.conv2d(concat_inputs, [
                             3, 3, output_channels + skip_input_channels + 1, output_channels], strides=1, activation=lambda x: x, name='concat'))
Exemple #2
0
    def _stereo_estimator(self, costs, upsampled_disp=None, scope='fgc-volume-filtering'):
        activation = self._leaky_relu()
        with tf.variable_scope(scope):
            # create initial cost volume
            if upsampled_disp is not None:
                volume = tf.concat([costs, upsampled_disp], -1)
            else:
                volume = costs

            names = []

            # disp-1
            names.append('{}/disp1'.format(scope))
            input_layer = volume
            self._add_to_layers(names[-1], sharedLayers.conv2d(input_layer, [3, 3, volume.get_shape(
            ).as_list()[3], 128], name='disp-1', bName='biases', activation=activation))

            # disp-2:
            names.append('{}/disp2'.format(scope))
            input_layer = self._get_layer_as_input(names[-2])
            self._add_to_layers(names[-1], sharedLayers.conv2d(input_layer, [
                                3, 3, 128, 128], name='disp-2', bName='biases', activation=activation))

            # disp-3
            names.append('{}/disp3'.format(scope))
            input_layer = self._get_layer_as_input(names[-2])
            self._add_to_layers(names[-1], sharedLayers.conv2d(input_layer, [
                                3, 3, 128, 96], name='disp-3', bName='biases', activation=activation))

            # disp-4
            names.append('{}/disp4'.format(scope))
            input_layer = self._get_layer_as_input(names[-2])
            self._add_to_layers(names[-1], sharedLayers.conv2d(input_layer, [
                                3, 3, 96, 64], name='disp-4', bName='biases', activation=activation))

            # disp-5
            names.append('{}/disp5'.format(scope))
            input_layer = self._get_layer_as_input(names[-2])
            self._add_to_layers(names[-1], sharedLayers.conv2d(input_layer, [
                                3, 3, 64, 32], name='disp-5', bName='biases', activation=activation))

            # disp-6
            names.append('{}/disp6'.format(scope))
            input_layer = self._get_layer_as_input(names[-2])
            self._add_to_layers(names[-1], sharedLayers.conv2d(input_layer, [
                                3, 3, 32, 1], name='disp-6', bName='biases', activation=lambda x: x))

            # in_channels = self._get_layer_as_input(names[-1]).get_shape()[-1]
            # return sharedLayers.EA(self._get_layer_as_input(names[-1]), self._get_layer_as_input(names[-1]), in_channels, 50, 5, 50)
            return self._get_layer_as_input(names[-1])
    def _build_network(self, args):
        if args['correlation']:
            self._add_to_layers('conv1a', sharedLayers.conv2d(
                self._left_input_batch, [7, 7, 3, 64], strides=2, name='conv1'))
            self._add_to_layers('conv1b', sharedLayers.conv2d(self._right_input_batch, [
                                7, 7, 3, 64], strides=2, name='conv1', reuse=True))

            self._add_to_layers('conv2a', sharedLayers.conv2d(
                self._get_layer_as_input('conv1a'), [5, 5, 64, 128], strides=2, name='conv2'))
            self._add_to_layers('conv2b', sharedLayers.conv2d(self._get_layer_as_input(
                'conv1b'), [5, 5, 64, 128], strides=2, name='conv2', reuse=True))

            self._add_to_layers('conv_redir', sharedLayers.conv2d(self._get_layer_as_input(
                'conv2a'), [1, 1, 128, 64], strides=1, name='conv_redir'))
            self._add_to_layers('corr', sharedLayers.correlation(self._get_layer_as_input(
                'conv2a'), self._get_layer_as_input('conv2b'), max_disp=MAX_DISP))

            self._add_to_layers('conv3', sharedLayers.conv2d(tf.concat([self._get_layer_as_input('corr'), self._get_layer_as_input(
                'conv_redir')], axis=3), [5, 5, MAX_DISP * 2 + 1 + 64, 256], strides=2, name='conv3'))
        else:
            concat_inputs = tf.concat(
                [self._left_img_batch, self._right_input_batch], axis=-1)
            self._add_to_layers('conv1', sharedLayers.conv2d(
                concat_inputs, [7, 7, 6, 64], strides=2, name='conv1'))
            self._add_to_layers('conv2', sharedLayers.conv2d(
                self._get_layer_as_input('conv1'), [5, 5, 64, 128], strides=2, name='conv2'))
            self._add_to_layers('conv3', sharedLayers.conv2d(
                self._get_layer_as_input('conv2'), [5, 5, 128, 256], strides=2, name='conv3'))

        self._add_to_layers('conv3/1', sharedLayers.conv2d(
            self._get_layer_as_input('conv3'), [3, 3, 256, 256], strides=1, name='conv3/1'))
        self._add_to_layers('conv4', sharedLayers.conv2d(self._get_layer_as_input(
            'conv3/1'), [3, 3, 256, 512], strides=2, name='conv4'))
        self._add_to_layers('conv4/1', sharedLayers.conv2d(
            self._get_layer_as_input('conv4'), [3, 3, 512, 512], strides=1, name='conv4/1'))
        self._add_to_layers('conv5', sharedLayers.conv2d(self._get_layer_as_input(
            'conv4/1'), [3, 3, 512, 512], strides=2, name='conv5'))
        self._add_to_layers('conv5/1', sharedLayers.conv2d(
            self._get_layer_as_input('conv5'), [3, 3, 512, 512], strides=1, name='conv5/1'))
        self._add_to_layers('conv6', sharedLayers.conv2d(self._get_layer_as_input(
            'conv5/1'), [3, 3, 512, 1024], strides=2, name='conv6'))
        self._add_to_layers('conv6/1', sharedLayers.conv2d(self._get_layer_as_input(
            'conv6'), [3, 3, 1024, 1024], strides=1, name='conv6/1'))

        self._upsampling_block(self._get_layer_as_input(
            'conv6/1'), self._get_layer_as_input('conv5/1'), 1024, 512, 512, name='up5')

        self._upsampling_block(self._get_layer_as_input(
            'up5/concat'), self._get_layer_as_input('conv4/1'), 512, 256, 512, name='up4')

        self._upsampling_block(self._get_layer_as_input(
            'up4/concat'), self._get_layer_as_input('conv3/1'), 256, 128, 256, name='up3')

        if args['correlation']:
            self._upsampling_block(self._get_layer_as_input(
                'up3/concat'), self._get_layer_as_input('conv2a'), 128, 64, 128, name='up2')
        else:
            self._upsampling_block(self._get_layer_as_input(
                'up3/concat'), self._get_layer_as_input('conv2'), 128, 64, 128, name='up2')

        if args['correlation']:
            self._upsampling_block(self._get_layer_as_input(
                'up2/concat'), self._get_layer_as_input('conv1a'), 64, 32, 64, name='up1')
        else:
            self._upsampling_block(self._get_layer_as_input(
                'up2/concat'), self._get_layer_as_input('conv1'), 64, 32, 64, name='up1')

        self._add_to_layers('prediction', sharedLayers.conv2d(self._get_layer_as_input(
            'up1/concat'), [3, 3, 32, 1], strides=1, activation=lambda x: x, name='prediction'))
        self._disparities.append(self._make_disp(self._layers['prediction'],2))

         ############ LOOK BELOW IF DISPNET GIVES WRONG RESULTS #########################
        # rescaled_prediction = -preprocessing.rescale_image(self._layers['prediction'], tf.shape(self._left_input_batch)[1:3])
        ################################################################################
        rescaled_prediction = tf.image.resize_images(self._layers['prediction'], tf.shape(self._left_input_batch)[1:3]) * 2
        
        self._layers['rescaled_prediction'] = tf.image.resize_image_with_crop_or_pad(rescaled_prediction, self._restore_shape[0], self._restore_shape[1])
        self._disparities.append(self._layers['rescaled_prediction'])
Exemple #4
0
    def _pyramid_features(self,
                          input_batch,
                          scope='pyramid',
                          reuse=False,
                          layer_prefix='pyramid'):
        with tf.variable_scope(scope, reuse=reuse):

            names = []
            activation = self._leaky_relu()

            # conv1
            names.append('{}/conv1'.format(layer_prefix))
            input_layer = input_batch
            self._add_to_layers(
                names[-1],
                sharedLayers.conv2d(
                    input_layer,
                    [3, 3, input_batch.get_shape()[-1].value, 16],
                    strides=2,
                    name='conv1',
                    bName='biases',
                    activation=activation))

            # conv2
            names.append('{}/conv2'.format(layer_prefix))
            input_layer = self._get_layer_as_input(names[-2])
            self._add_to_layers(
                names[-1],
                sharedLayers.conv2d(input_layer, [3, 3, 16, 16],
                                    strides=1,
                                    name='conv2',
                                    bName='biases',
                                    activation=activation))

            # conv3
            names.append('{}/conv3'.format(layer_prefix))
            input_layer = self._get_layer_as_input(names[-2])
            self._add_to_layers(
                names[-1],
                sharedLayers.conv2d(input_layer, [3, 3, 16, 32],
                                    strides=2,
                                    name='conv3',
                                    bName='biases',
                                    activation=activation))

            # conv4
            names.append('{}/conv4'.format(layer_prefix))
            input_layer = self._get_layer_as_input(names[-2])
            self._add_to_layers(
                names[-1],
                sharedLayers.conv2d(input_layer, [3, 3, 32, 32],
                                    strides=1,
                                    name='conv4',
                                    bName='biases',
                                    activation=activation))

            # conv5
            names.append('{}/conv5'.format(layer_prefix))
            input_layer = self._get_layer_as_input(names[-2])
            self._add_to_layers(
                names[-1],
                sharedLayers.conv2d(input_layer, [3, 3, 32, 64],
                                    strides=2,
                                    name='conv5',
                                    bName='biases',
                                    activation=activation))

            # conv6
            names.append('{}/conv6'.format(layer_prefix))
            input_layer = self._get_layer_as_input(names[-2])
            self._add_to_layers(
                names[-1],
                sharedLayers.conv2d(input_layer, [3, 3, 64, 64],
                                    strides=1,
                                    name='conv6',
                                    bName='biases',
                                    activation=activation))

            # conv7
            names.append('{}/conv7'.format(layer_prefix))
            input_layer = self._get_layer_as_input(names[-2])
            self._add_to_layers(
                names[-1],
                sharedLayers.conv2d(input_layer, [3, 3, 64, 96],
                                    strides=2,
                                    name='conv7',
                                    bName='biases',
                                    activation=activation))

            # conv8
            names.append('{}/conv8'.format(layer_prefix))
            input_layer = self._get_layer_as_input(names[-2])
            self._add_to_layers(
                names[-1],
                sharedLayers.conv2d(input_layer, [3, 3, 96, 96],
                                    strides=1,
                                    name='conv8',
                                    bName='biases',
                                    activation=activation))

            # conv9
            names.append('{}/conv9'.format(layer_prefix))
            input_layer = self._get_layer_as_input(names[-2])
            self._add_to_layers(
                names[-1],
                sharedLayers.conv2d(input_layer, [3, 3, 96, 128],
                                    strides=2,
                                    name='conv9',
                                    bName='biases',
                                    activation=activation))

            # conv10
            names.append('{}/conv10'.format(layer_prefix))
            input_layer = self._get_layer_as_input(names[-2])
            self._add_to_layers(
                names[-1],
                sharedLayers.conv2d(input_layer, [3, 3, 128, 128],
                                    strides=1,
                                    name='conv10',
                                    bName='biases',
                                    activation=activation))

            # conv11
            names.append('{}/conv11'.format(layer_prefix))
            input_layer = self._get_layer_as_input(names[-2])
            self._add_to_layers(
                names[-1],
                sharedLayers.conv2d(input_layer, [3, 3, 128, 192],
                                    strides=2,
                                    name='conv11',
                                    bName='biases',
                                    activation=activation))

            # conv12
            names.append('{}/conv12'.format(layer_prefix))
            input_layer = self._get_layer_as_input(names[-2])
            self._add_to_layers(
                names[-1],
                sharedLayers.conv2d(input_layer, [3, 3, 192, 192],
                                    strides=1,
                                    name='conv12',
                                    bName='biases',
                                    activation=activation))