示例#1
0
def inference_refine(images,
                     coarse7_output,
                     keep_conv,
                     reuse=False,
                     trainable=True):
    fine1_conv = conv2d('fine1',
                        images, [9, 9, 3, 63], [63], [1, 2, 2, 1],
                        padding='VALID',
                        reuse=reuse,
                        trainable=trainable)
    fine1 = tf.nn.max_pool(fine1_conv,
                           ksize=[1, 3, 3, 1],
                           strides=[1, 2, 2, 1],
                           padding='SAME',
                           name='fine_pool1')
    fine1_dropout = tf.nn.dropout(fine1, keep_conv)
    fine2 = tf.concat([fine1_dropout, coarse7_output], 3)
    fine3 = conv2d('fine3',
                   fine2, [5, 5, 64, 64], [64], [1, 1, 1, 1],
                   padding='SAME',
                   reuse=reuse,
                   trainable=trainable)
    fine3_dropout = tf.nn.dropout(fine3, keep_conv)
    fine4 = conv2d('fine4',
                   fine3_dropout, [5, 5, 64, 1], [1], [1, 1, 1, 1],
                   padding='SAME',
                   reuse=reuse,
                   trainable=trainable)
    return fine4
示例#2
0
    def inference(self, x, reuse=False):
        print(x.get_shape())
        # conv2d arguments = (scope_name, inputs, shape, bias_shape, stride, padding='VALID', wd=0.0, reuse=False, trainable=True, with_w=False)
        conv_h0, conv_h0_w, conv_h0_b = mp.conv2d(
            'd_conv_h0',
            x, [5, 5, x.get_shape()[-1], self.first_conv_dim],
            [self.first_conv_dim], [1, 2, 2, 1],
            padding='SAME',
            reuse=reuse,
            with_w=True)
        h0 = mp.lrelu(conv_h0)

        # conv2d arguments = (scope_name, inputs, shape, bias_shape, stride, padding='VALID', wd=0.0, reuse=False, trainable=True, with_w=False)
        conv_h1, conv_h1_w, conv_h1_b = mp.conv2d(
            'd_conv_h1',
            h0, [5, 5, h0.get_shape()[-1], self.first_conv_dim],
            [self.first_conv_dim], [1, 2, 2, 1],
            padding='SAME',
            reuse=reuse,
            with_w=True)
        h1 = mp.lrelu(conv_h1)

        # linear projection
        h2 = mp.linear_project('d_lin_project_h1',
                               tf.reshape(h1, [self.batch_size, 7 * 7 * 64]),
                               1,
                               reuse=reuse)
        return tf.nn.sigmoid(h2), h2
示例#3
0
def globalDepthMap(images, reuse=False, trainable=True):
    with tf.name_scope("Global_Depth"):
        coarse1_conv = conv2d('coarse1',
                              images, [11, 11, 3, 96], [96], [1, 4, 4, 1],
                              padding='VALID',
                              reuse=reuse,
                              trainable=trainable)
        coarse1 = tf.nn.max_pool(coarse1_conv,
                                 ksize=[1, 3, 3, 1],
                                 strides=[1, 2, 2, 1],
                                 padding='VALID',
                                 name='pool1')
        coarse2_conv = conv2d('coarse2',
                              coarse1, [5, 5, 96, 256], [256], [1, 1, 1, 1],
                              padding='VALID',
                              reuse=reuse,
                              trainable=trainable)
        coarse2 = tf.nn.max_pool(coarse2_conv,
                                 ksize=[1, 3, 3, 1],
                                 strides=[1, 2, 2, 1],
                                 padding='SAME',
                                 name='pool2')
        coarse3 = conv2d('coarse3',
                         coarse2, [3, 3, 256, 384], [384], [1, 1, 1, 1],
                         padding='VALID',
                         reuse=reuse,
                         trainable=trainable)
        coarse4 = conv2d('coarse4',
                         coarse3, [3, 3, 384, 384], [384], [1, 1, 1, 1],
                         padding='VALID',
                         reuse=reuse,
                         trainable=trainable)
        coarse5 = conv2d('coarse5',
                         coarse4, [3, 3, 384, 256], [256], [1, 1, 1, 1],
                         padding='VALID',
                         reuse=reuse,
                         trainable=trainable)
        coarse6 = fullyConnectedLayer('coarse6',
                                      coarse5, [6 * 10 * 256, 4096], [4096],
                                      reuse=reuse,
                                      trainable=trainable)
        coarse6_dropout = tf.nn.dropout(coarse6, 0.8)
        coarse7 = fullyConnectedLayer('coarse7',
                                      coarse6_dropout, [4096, 4070], [4070],
                                      reuse=reuse,
                                      trainable=trainable)
        coarse7_output = tf.reshape(coarse7, [-1, 55, 74, 1])

        #print("Coarse1_Conv: ", coarse1_conv._shape)
        #print("Coarse1: ", coarse1._shape)
        #print("Coarse2_Conv: ", coarse2_conv._shape)
        #print("Coarse2: ", coarse2._shape)
        #print("Coarse3: ", coarse3._shape)
        #print("Coarse4: ", coarse4._shape)
        #print("Coarse5: ", coarse5._shape)
        #print("Coarse6: ", coarse6._shape)
        #print("Coarse7: ", coarse7._shape)
        #print("Coarse7_output: ", coarse7_output._shape)
    #return coarse7_output, coarse6, coarse5, coarse3
    return coarse7_output
示例#4
0
def inference(images, reuse=False, trainable=True):
    coarse1_conv = conv2d('coarse1', images, [11, 11, 3, 96], [96], [1, 4, 4, 1], padding='VALID', reuse=reuse, trainable=trainable)
    coarse1 = tf.nn.max_pool(coarse1_conv, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID', name='pool1')
    coarse2_conv = conv2d('coarse2', coarse1, [5, 5, 96, 256], [256], [1, 1, 1, 1], padding='VALID', reuse=reuse, trainable=trainable)
    coarse2 = tf.nn.max_pool(coarse2_conv, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')
    coarse3 = conv2d('coarse3', coarse2, [3, 3, 256, 384], [384], [1, 1, 1, 1], padding='VALID', reuse=reuse, trainable=trainable)
    coarse4 = conv2d('coarse4', coarse3, [3, 3, 384, 384], [384], [1, 1, 1, 1], padding='VALID', reuse=reuse, trainable=trainable)
    coarse5 = conv2d('coarse5', coarse4, [3, 3, 384, 256], [256], [1, 1, 1, 1], padding='VALID', reuse=reuse, trainable=trainable)
    coarse6 = fc('coarse6', coarse5, [6*10*256, 4096], [4096], reuse=reuse, trainable=trainable)
    coarse7 = fc('coarse7', coarse6, [4096, 4070], [4070], reuse=reuse, trainable=trainable)
    coarse7_output = tf.reshape(coarse7, [-1, 55, 74, 1])
    return coarse7_output
示例#5
0
def localDepthMap(images,
                  coarse7_output,
                  keep_conv,
                  reuse=False,
                  trainable=True):
    with tf.name_scope("Local_Depth"):
        fine1_conv = conv2d('fine1',
                            images, [9, 9, 3, 63], [63], [1, 2, 2, 1],
                            padding='VALID',
                            reuse=reuse,
                            trainable=trainable)
        fine1 = tf.nn.max_pool(fine1_conv,
                               ksize=[1, 3, 3, 1],
                               strides=[1, 2, 2, 1],
                               padding='SAME',
                               name='fine_pool1')
        fine1_dropout = tf.nn.dropout(fine1, keep_conv)
        fine2 = tf.concat(axis=3,
                          values=[fine1_dropout, coarse7_output],
                          name="fine2_concat")
        fine3 = conv2d('fine3',
                       fine2, [5, 5, 64, 64], [64], [1, 1, 1, 1],
                       padding='SAME',
                       reuse=reuse,
                       trainable=trainable)
        fine3_dropout = tf.nn.dropout(fine3, keep_conv)
        print("fine3_dropout ", fine3_dropout._shape)
        fine4_conv = conv2d('fine4_conv',
                            fine3_dropout, [5, 5, 64, 1], [1], [1, 1, 1, 1],
                            padding='SAME',
                            reuse=reuse,
                            trainable=trainable)
        print("fine4_conv ", fine4_conv._shape)
        fine4_full = fullyConnectedLayer('fine4_full',
                                         fine4_conv, [55 * 74 * 1, 4070],
                                         [4070],
                                         reuse=reuse,
                                         trainable=trainable)
        print("fine4_full ", fine4_full._shape)
        fine4 = tf.reshape(fine4_full, [-1, 55, 74, 1])

        #print("fine1_conv ", fine1_conv._shape)
        #print("fine1 ", fine1._shape)
        #print("fine1_dropout ", fine1_dropout._shape)
        #print("fine2 ", fine2._shape)
        #print("fine3 ", fine3._shape)
        #print("fine3_dropout ", fine3_dropout._shape)
        #print("fine4 ", fine4._shape)

    #return fine4, fine3_dropout, fine3, fine2, fine1_dropout, fine1, fine1_conv
    return fine4
示例#6
0
    def inference(self, x, reuse=False, trainable=True):
        with tf.variable_scope("discriminator") as scope:
            if reuse:
                scope.reuse_variables()

            print("===D")
            print(x.get_shape())
            # conv2d arguments = (scope_name, inputs, shape, bias_shape, stride, padding='VALID', wd=0.0, reuse=False, trainable=True, with_w=False)
            conv_h0, conv_h0_w, conv_h0_b = mp.conv2d(
                'd_conv_h0',
                x, [5, 5, x.get_shape()[-1], self.first_conv_dim],
                [self.first_conv_dim], [1, 2, 2, 1],
                padding='SAME',
                reuse=reuse,
                with_w=True,
                trainable=trainable)
            h0 = mp.lrelu(conv_h0)
            print(h0.get_shape())
            # conv2d arguments = (scope_name, inputs, shape, bias_shape, stride, padding='VALID', wd=0.0, reuse=False, trainable=True, with_w=False)
            conv_h1, conv_h1_w, conv_h1_b = mp.conv2d(
                'd_conv_h1',
                h0, [5, 5, h0.get_shape()[-1], self.first_conv_dim * 2],
                [self.first_conv_dim * 2], [1, 2, 2, 1],
                padding='SAME',
                reuse=reuse,
                with_w=True,
                trainable=trainable)
            #h1 = mp.lrelu(conv_h1)
            h1 = mp.lrelu(self.d_bn1(conv_h1, trainable=trainable))
            # h1 = mp.lrelu(mp.batch_norm(conv_h1, scope_name='d_bn_h1', reuse=reuse, trainable=trainable))

            # 3rd
            conv_h2, conv_h2_w, conv_h2_b = mp.conv2d(
                'd_conv_h2',
                h1, [5, 5, h1.get_shape()[-1], self.first_conv_dim * 4],
                [self.first_conv_dim * 4], [1, 2, 2, 1],
                padding='SAME',
                reuse=reuse,
                with_w=True,
                trainable=trainable)
            #h2 = mp.lrelu(conv_h2)
            h2 = mp.lrelu(self.d_bn2(conv_h2, trainable=trainable))
            # h2 = mp.lrelu(mp.batch_norm(conv_h2, scope_name='d_bn_h2', reuse=reuse, trainable=trainable))
            print(h2.get_shape())

            # linear projection (skip h3)
            h3 = mp.linear_project('d_lin_project_h3',
                                   tf.reshape(h2, [self.batch_size, -1]),
                                   1,
                                   reuse=reuse)
            return tf.nn.sigmoid(h3), h3