Example #1
0
def inference(images, phase_train, scope=''):
    BATCH_SIZE = int(BATCH / NUM_GPU)
    with tf.name_scope(scope, [images]):
        #Conv11-64p1
        conv0 = cnv.conv(images,
                         'conv0', [11, 11, 3, 32],
                         stride=[1, 1, 1, 1],
                         padding='SAME',
                         wd=WEIGHT_DECAY,
                         FLOAT16=FLOAT16)
        bnorm0 = bn.batch_norm_layer(conv0,
                                     train_phase=phase_train,
                                     scope_bn='BN0')
        relu0 = act.ReLU(bnorm0, 'ReLU0')
        #SKIP CONNECTION 0
        #Conv9-128s2
        conv1 = cnv.conv(relu0,
                         'conv1', [9, 9, 32, 64],
                         stride=[1, 2, 2, 1],
                         padding='SAME',
                         wd=WEIGHT_DECAY,
                         FLOAT16=FLOAT16)
        bnorm1 = bn.batch_norm_layer(conv1,
                                     train_phase=phase_train,
                                     scope_bn='BN1')
        relu1 = act.ReLU(bnorm1, 'ReLU1')
        #Conv3-128p1
        conv2 = cnv.conv(relu1,
                         'conv2', [3, 3, 64, 128],
                         wd=WEIGHT_DECAY,
                         FLOAT16=FLOAT16)
        bnorm2 = bn.batch_norm_layer(conv2,
                                     train_phase=phase_train,
                                     scope_bn='BN2')
        relu2 = act.ReLU(bnorm2, 'ReLU2')
        #Conv3-128p1
        conv3 = cnv.conv(relu2,
                         'conv3', [3, 3, 128, 128],
                         wd=WEIGHT_DECAY,
                         FLOAT16=FLOAT16)
        bnorm3 = bn.batch_norm_layer(conv3,
                                     train_phase=phase_train,
                                     scope_bn='BN3')
        relu3 = act.ReLU(bnorm3, 'ReLU3')
        #SKIP CONNEgradLossCTION 1
        #Conv7-256s2
        conv4 = cnv.conv(relu3,
                         'conv4', [7, 7, 128, 256],
                         stride=[1, 2, 2, 1],
                         padding='SAME',
                         wd=WEIGHT_DECAY,
                         FLOAT16=FLOAT16)
        bnorm4 = bn.batch_norm_layer(conv4,
                                     train_phase=phase_train,
                                     scope_bn='BN4')
        relu4 = act.ReLU(bnorm4, 'ReLU4')
        #Conv3-256p1
        conv5 = cnv.conv(relu4,
                         'conv5', [3, 3, 256, 256],
                         wd=WEIGHT_DECAY,
                         FLOAT16=FLOAT16)
        bnorm5 = bn.batch_norm_layer(conv5,
                                     train_phase=phase_train,
                                     scope_bn='BN5')
        relu5 = act.ReLU(bnorm5, 'ReLU5')
        #Conv3-256p1
        conv6 = cnv.conv(relu5,
                         'conv6', [3, 3, 256, 256],
                         wd=WEIGHT_DECAY,
                         FLOAT16=FLOAT16)
        bnorm6 = bn.batch_norm_layer(conv6,
                                     train_phase=phase_train,
                                     scope_bn='BN6')
        relu6 = act.ReLU(bnorm6, 'ReLU6')
        #SKIP CONNECTION 2
        #Conv5-512s2
        conv7_1 = cnv.conv(relu6,
                           'conv7_1', [5, 1, 256, 512],
                           stride=[1, 2, 1, 1],
                           padding='SAME',
                           wd=WEIGHT_DECAY,
                           FLOAT16=FLOAT16)
        conv7_2 = cnv.conv(conv7_1,
                           'conv7_2', [1, 5, 512, 512],
                           stride=[1, 1, 2, 1],
                           padding='SAME',
                           wd=WEIGHT_DECAY,
                           FLOAT16=FLOAT16)
        bnorm7 = bn.batch_norm_layer(conv7_2,
                                     train_phase=phase_train,
                                     scope_bn='BN7')
        relu7 = act.ReLU(bnorm7, 'ReLU7')
        #Conv3-512p1
        conv8_1 = cnv.conv(relu7,
                           'conv8_1', [3, 1, 512, 512],
                           wd=WEIGHT_DECAY,
                           FLOAT16=FLOAT16)
        conv8_2 = cnv.conv(conv8_1,
                           'conv8_2', [1, 3, 512, 512],
                           wd=WEIGHT_DECAY,
                           FLOAT16=FLOAT16)
        bnorm8 = bn.batch_norm_layer(conv8_2,
                                     train_phase=phase_train,
                                     scope_bn='BN8')
        relu8 = act.ReLU(bnorm8, 'ReLU8')
        #Conv3-512p1
        conv9_1 = cnv.conv(relu8,
                           'conv9_1', [1, 3, 512, 512],
                           wd=WEIGHT_DECAY,
                           FLOAT16=FLOAT16)
        conv9_2 = cnv.conv(conv9_1,
                           'conv9_2', [3, 1, 512, 512],
                           wd=WEIGHT_DECAY,
                           FLOAT16=FLOAT16)
        bnorm9 = bn.batch_norm_layer(conv9_2,
                                     train_phase=phase_train,
                                     scope_bn='BN9')
        relu9 = act.ReLU(bnorm9, 'ReLU9')
        #SKIP CONNECTION 3
        #Conv3-1024s2
        conv10_1 = cnv.conv(relu9,
                            'conv10_1', [3, 1, 512, 1024],
                            stride=[1, 2, 1, 1],
                            padding='SAME',
                            wd=WEIGHT_DECAY,
                            FLOAT16=FLOAT16)
        conv10_2 = cnv.conv(conv10_1,
                            'conv10_2', [1, 3, 1024, 1024],
                            stride=[1, 1, 2, 1],
                            padding='SAME',
                            wd=WEIGHT_DECAY,
                            FLOAT16=FLOAT16)
        bnorm10 = bn.batch_norm_layer(conv10_2,
                                      train_phase=phase_train,
                                      scope_bn='BN10')
        relu10 = act.ReLU(bnorm10, 'ReLU10')
        #Conv3-1024p1
        conv11_1 = cnv.conv(relu10,
                            'conv1UPDATE_OPS_COLLECTION1_1',
                            [1, 3, 1024, 1024],
                            wd=WEIGHT_DECAY,
                            FLOAT16=FLOAT16)
        conv11_2 = cnv.conv(conv11_1,
                            'conv11_2', [3, 1, 1024, 1024],
                            wd=WEIGHT_DECAY,
                            FLOAT16=FLOAT16)
        bnorm11 = bn.batch_norm_layer(conv11_2,
                                      train_phase=phase_train,
                                      scope_bn='BN11')
        relu11 = act.ReLU(bnorm11, 'ReLU11')

        #GO UP
        deconv1 = dcnv.deconv(
            relu11,
            [BATCH_SIZE,
             int(IMAGE_SIZE_H / 8),
             int(IMAGE_SIZE_W / 8), 512],
            'deconv1', [4, 4, 512, 1024],
            stride=[1, 2, 2, 1],
            padding='SAME',
            wd=WEIGHT_DECAY,
            FLOAT16=FLOAT16)
        dbnorm1 = bn.batch_norm_layer(deconv1,
                                      train_phase=phase_train,
                                      scope_bn='dBN1')
        drelu1 = act.ReLU(dbnorm1 + relu9, 'dReLU1')

        conv12_1 = cnv.conv(drelu1,
                            'conv12_1', [3, 1, 512, 512],
                            wd=WEIGHT_DECAY,
                            FLOAT16=FLOAT16)
        conv12_2 = cnv.conv(conv12_1,
                            'conv12_2', [1, 3, 512, 512],
                            wd=WEIGHT_DECAY,
                            FLOAT16=FLOAT16)
        bnorm12 = bn.batch_norm_layer(conv12_2,
                                      train_phase=phase_train,
                                      scope_bn='BN12')
        relu12 = act.ReLU(bnorm12, 'ReLU12')

        deconv2 = dcnv.deconv(
            relu12,
            [BATCH_SIZE,
             int(IMAGE_SIZE_H / 4),
             int(IMAGE_SIZE_W / 4), 256],
            'deconv2', [4, 4, 256, 512],
            stride=[1, 2, 2, 1],
            padding='SAME',
            wd=WEIGHT_DECAY,
            FLOAT16=FLOAT16)
        dbnorm2 = bn.batch_norm_layer(deconv2,
                                      train_phase=phase_train,
                                      scope_bn='dBN2')
        drelu2 = act.ReLU(dbnorm2 + relu6, 'dReLU2')

        conv13 = cnv.conv(drelu2,
                          'conv13', [3, 3, 256, 256],
                          wd=WEIGHT_DECAY,
                          FLOAT16=FLOAT16)
        bnorm13 = bn.batch_norm_layer(conv13,
                                      train_phase=phase_train,
                                      scope_bn='BN13')
        relu13 = act.ReLU(bnorm13, 'ReLU13')

        deconv3 = dcnv.deconv(
            relu13,
            [BATCH_SIZE,
             int(IMAGE_SIZE_H / 2),
             int(IMAGE_SIZE_W / 2), 128],
            'deconv3', [4, 4, 128, 256],
            stride=[1, 2, 2, 1],
            padding='SAME',
            wd=WEIGHT_DECAY,
            FLOAT16=FLOAT16)
        dbnorm3 = bn.batch_norm_layer(deconv3,
                                      train_phase=phase_train,
                                      scope_bn='dBN3')
        drelu3 = act.ReLU(dbnorm3 + relu3, 'dReLU3')

        conv14 = cnv.conv(drelu3,
                          'conv14', [3, 3, 128, 128],
                          wd=WEIGHT_DECAY,
                          FLOAT16=FLOAT16)
        bnorm14 = bn.batch_norm_layer(conv14,
                                      train_phase=phase_train,
                                      scope_bn='BN14')
        relu14 = act.ReLU(bnorm14, 'ReLU14')

        deconv4 = dcnv.deconv(
            relu14,
            [BATCH_SIZE, int(IMAGE_SIZE_H),
             int(IMAGE_SIZE_W), 32],
            'deconv4', [4, 4, 32, 128],
            stride=[1, 2, 2, 1],
            padding='SAME',
            wd=WEIGHT_DECAY,
            FLOAT16=FLOAT16)
        dbnorm4 = bn.batch_norm_layer(deconv4,
                                      train_phase=phase_train,
                                      scope_bn='dBN4')
        drelu3 = act.ReLU(dbnorm4 + relu0, 'dReLU4')

        conv_last = cnv.conv(drelu3,
                             'conv_last', [3, 3, 32, 32],
                             wd=WEIGHT_DECAY,
                             FLOAT16=FLOAT16)
        bnorm_last = bn.batch_norm_layer(conv_last,
                                         train_phase=phase_train,
                                         scope_bn='BNl')
        relu_last = act.ReLU(bnorm_last, 'ReLU_last')

        scores = cnv.conv(relu_last,
                          'scores', [3, 3, 32, 1],
                          wd=0,
                          FLOAT16=FLOAT16)
        tf.summary.image('output', scores)

        return scores
Example #2
0
def timing(deconv):
    deconv(1, 1, 0, 1, 1, 0)
    deconv(1, 1, 0, 1, 1, 180)
    deconv(1, 1, 0, 1, 1, 90)
    deconv(1, 1, 0, 1, 1, 45)

    deconv(2., 2., 0, 1, 1, 0)
    deconv(2., 2., 0, 1, 1, 180)
    deconv(2., 2., 0, 1, 1, 90)
    deconv(2., 2., 0, 1, 1, 45)

    deconv(2.7, 1.7, 0, 1, 1, 0)
    deconv(2.7, 1.7, 0, 1, 1, 180)
    deconv(2.7, 1.7, 0, 1, 1, 90)
    deconv(2.7, 1.7, 0, 1, 1, 45)

    deconv(2.7, 1.7, 10, 1, 1, 0)
    deconv(2.7, 1.7, 20, 1, 1, 180)
    deconv(2.7, 1.7, 30, 1, 1, 90)
    deconv(2.7, 1.7, 40, 1, 1, 45)
Example #3
0
def inference(images, phase_train, scope='CNN'):

    with tf.name_scope(scope, [images]):

        #THE DEPTH NETWORK
        #Layer 1: Output Size 192x256x32
        conv1 = cnv.conv(images,
                         'conv1', [11, 11, 3 * sq, 32],
                         stride=[1, 1, 1, 1],
                         padding='SAME',
                         wd=WEIGHT_DECAY,
                         FLOAT16=FLOAT16)
        bnorm1 = bn.batch_norm_layer(conv1,
                                     train_phase=phase_train,
                                     scope_bn='BN1')
        relu1 = ops.leaky_relu(input=bnorm1, leak=0.1)
        #SKIP CONNECTION 0

        #Layer 2 - Downsample:Output Size 96x128x64
        conv2 = cnv.conv(relu1,
                         'conv2', [9, 9, 32, 64],
                         stride=[1, 2, 2, 1],
                         padding='SAME',
                         wd=WEIGHT_DECAY,
                         FLOAT16=FLOAT16)
        bnorm2 = bn.batch_norm_layer(conv2,
                                     train_phase=phase_train,
                                     scope_bn='BN2')
        relu2 = ops.leaky_relu(input=bnorm2, leak=0.1)

        #Layer 3:Output Size 96x128x64
        conv3 = cnv.conv(relu2,
                         'conv3', [3, 3, 64, 64],
                         wd=WEIGHT_DECAY,
                         FLOAT16=FLOAT16)
        bnorm3 = bn.batch_norm_layer(conv3,
                                     train_phase=phase_train,
                                     scope_bn='BN3')
        relu3 = ops.leaky_relu(input=bnorm3, leak=0.1)
        #SKIP CONNECTION 1

        #Layer 4 - Downsample:Output Size 48x64x128
        conv4 = cnv.conv(relu3,
                         'conv4', [7, 7, 64, 128],
                         stride=[1, 2, 2, 1],
                         padding='SAME',
                         wd=WEIGHT_DECAY,
                         FLOAT16=FLOAT16)
        bnorm4 = bn.batch_norm_layer(conv4,
                                     train_phase=phase_train,
                                     scope_bn='BN4')
        relu4 = ops.leaky_relu(input=bnorm4, leak=0.1)

        #Layer 5:Output Size 48x64x128
        conv5 = cnv.conv(relu4,
                         'conv5', [3, 3, 128, 128],
                         wd=WEIGHT_DECAY,
                         FLOAT16=FLOAT16)
        bnorm5 = bn.batch_norm_layer(conv5,
                                     train_phase=phase_train,
                                     scope_bn='BN5')
        relu5 = ops.leaky_relu(input=bnorm5, leak=0.1)
        #SKIP CONNECTION 2

        #Layer 6 Downsample:Output Size 24x32x256
        conv6_1 = cnv.conv(relu5,
                           'conv6_1', [5, 1, 128, 256],
                           stride=[1, 2, 1, 1],
                           padding='SAME',
                           wd=WEIGHT_DECAY,
                           FLOAT16=FLOAT16)
        conv6_2 = cnv.conv(conv6_1,
                           'conv6_2', [1, 5, 256, 256],
                           stride=[1, 1, 2, 1],
                           padding='SAME',
                           wd=WEIGHT_DECAY,
                           FLOAT16=FLOAT16)
        bnorm6 = bn.batch_norm_layer(conv6_2,
                                     train_phase=phase_train,
                                     scope_bn='BN6')
        relu6 = ops.leaky_relu(input=bnorm6, leak=0.1)

        #Layer 7:Output Size 24x32x256
        conv7_1 = cnv.conv(relu6,
                           'conv7_1', [3, 1, 256, 256],
                           wd=WEIGHT_DECAY,
                           FLOAT16=FLOAT16)
        conv7_2 = cnv.conv(conv7_1,
                           'conv7_2', [1, 3, 256, 256],
                           wd=WEIGHT_DECAY,
                           FLOAT16=FLOAT16)
        bnorm7 = bn.batch_norm_layer(conv7_2,
                                     train_phase=phase_train,
                                     scope_bn='BN7')
        relu7 = ops.leaky_relu(input=bnorm7, leak=0.1)
        #SKIP CONNECTION 3

        #Layer 8 Downsample:Output Size 12x16x512
        conv8_1 = cnv.conv(relu7,
                           'conv8_1', [3, 1, 256, 512],
                           stride=[1, 2, 1, 1],
                           padding='SAME',
                           wd=WEIGHT_DECAY,
                           FLOAT16=FLOAT16)
        conv8_2 = cnv.conv(conv8_1,
                           'conv8_2', [1, 3, 512, 512],
                           stride=[1, 1, 2, 1],
                           padding='SAME',
                           wd=WEIGHT_DECAY,
                           FLOAT16=FLOAT16)
        bnorm8 = bn.batch_norm_layer(conv8_2,
                                     train_phase=phase_train,
                                     scope_bn='BN8')
        relu8 = ops.leaky_relu(input=bnorm8, leak=0.1)

        #Layer 9:Output Size 12x16x512
        conv9_1 = cnv.conv(relu8,
                           'conv9_1', [1, 3, 512, 512],
                           wd=WEIGHT_DECAY,
                           FLOAT16=FLOAT16)
        conv9_2 = cnv.conv(conv9_1,
                           'conv9_2', [3, 1, 512, 512],
                           wd=WEIGHT_DECAY,
                           FLOAT16=FLOAT16)
        bnorm9 = bn.batch_norm_layer(conv9_2,
                                     train_phase=phase_train,
                                     scope_bn='BN9')
        relu9 = ops.leaky_relu(input=bnorm9, leak=0.1)

        #GO UP
        #Layer 10 UP 1:Output Size 24x32x256
        conv10 = dcnv.deconv(
            relu9,
            [BATCH_SIZE,
             int(IMAGE_SIZE_H / 8),
             int(IMAGE_SIZE_W / 8), 256],
            'deconv1', [4, 4, 256, 512],
            stride=[1, 2, 2, 1],
            padding='SAME',
            wd=WEIGHT_DECAY,
            FLOAT16=FLOAT16)
        bnorm10 = bn.batch_norm_layer(conv10,
                                      train_phase=phase_train,
                                      scope_bn='BN10')
        relu10 = ops.leaky_relu(input=bnorm10, leak=0.1)

        #Layer 11 UP 1:Output 24x32x256
        conv11 = cnv.conv(relu10 + relu7,
                          'conv11', [3, 3, 256, 256],
                          wd=WEIGHT_DECAY,
                          FLOAT16=FLOAT16)
        bnorm11 = bn.batch_norm_layer(conv11,
                                      train_phase=phase_train,
                                      scope_bn='BN11')
        relu11 = ops.leaky_relu(input=bnorm11, leak=0.1)

        #Layer 12 UP 2:Output Size 48x64x128
        conv12 = dcnv.deconv(
            relu11,
            [BATCH_SIZE,
             int(IMAGE_SIZE_H / 4),
             int(IMAGE_SIZE_W / 4), 128],
            'deconv2', [4, 4, 128, 256],
            stride=[1, 2, 2, 1],
            padding='SAME',
            wd=WEIGHT_DECAY,
            FLOAT16=FLOAT16)
        bnorm12 = bn.batch_norm_layer(conv12,
                                      train_phase=phase_train,
                                      scope_bn='BN12')
        relu12 = ops.leaky_relu(input=bnorm12, leak=0.1)

        #Layer 13 UP 2:Output Size 48x64x128
        conv13 = cnv.conv(relu12 + relu5,
                          'conv13', [3, 3, 128, 128],
                          wd=WEIGHT_DECAY,
                          FLOAT16=FLOAT16)
        bnorm13 = bn.batch_norm_layer(conv13,
                                      train_phase=phase_train,
                                      scope_bn='BN13')
        relu13 = ops.leaky_relu(input=bnorm13, leak=0.1)

        #Layer 14 UP 3:Output Size 96x128x64
        conv14 = dcnv.deconv(
            relu13,
            [BATCH_SIZE,
             int(IMAGE_SIZE_H / 2),
             int(IMAGE_SIZE_W / 2), 64],
            'deconv3', [4, 4, 64, 128],
            stride=[1, 2, 2, 1],
            padding='SAME',
            wd=WEIGHT_DECAY,
            FLOAT16=FLOAT16)
        bnorm14 = bn.batch_norm_layer(conv14,
                                      train_phase=phase_train,
                                      scope_bn='BN14')
        relu14 = ops.leaky_relu(input=bnorm14, leak=0.1)

        #Layer 15 UP 3:Output Size 96x128x64
        conv15 = cnv.conv(relu14 + relu3,
                          'conv15', [3, 3, 64, 64],
                          wd=WEIGHT_DECAY,
                          FLOAT16=FLOAT16)
        bnorm15 = bn.batch_norm_layer(conv15,
                                      train_phase=phase_train,
                                      scope_bn='BN15')
        relu15 = ops.leaky_relu(input=bnorm15, leak=0.1)

        #Layer 16 UP 4:Output Size 192x256x32
        conv16 = dcnv.deconv(
            relu15,
            [BATCH_SIZE, int(IMAGE_SIZE_H),
             int(IMAGE_SIZE_W), 32],
            'deconv4', [4, 4, 32, 64],
            stride=[1, 2, 2, 1],
            padding='SAME',
            wd=WEIGHT_DECAY,
            FLOAT16=FLOAT16)
        bnorm16 = bn.batch_norm_layer(conv16,
                                      train_phase=phase_train,
                                      scope_bn='BN16')
        relu16 = ops.leaky_relu(input=bnorm16, leak=0.1)

        #Layer 17:Output Size 192x256x32
        conv17 = cnv.conv(relu16 + relu1,
                          'conv17', [3, 3, 32, 32],
                          wd=WEIGHT_DECAY,
                          FLOAT16=FLOAT16)
        bnorm17 = bn.batch_norm_layer(conv17,
                                      train_phase=phase_train,
                                      scope_bn='BN17')
        relu17 = ops.leaky_relu(input=bnorm17, leak=0.1)

        #Layer 18:Output Size 192x256x2 - 1 depth image
        depth = cnv.conv(relu17,
                         'scores', [3, 3, 32, 1],
                         wd=0,
                         FLOAT16=FLOAT16)

        #MOTION NETWORK
        conv_tr = cnv.conv(relu9,
                           'conv_transform', [3, 3, 512, 128],
                           wd=WEIGHT_DECAY,
                           FLOAT16=FLOAT16)
        relu_tr = ops.leaky_relu(input=conv_tr, leak=0.1)
        #fc1
        fc1 = cnv.fclayer(relu_tr, BATCH_SIZE, 1024, "fc1", wd=WEIGHT_DECAY)
        fc1_relu = ops.leaky_relu(input=fc1, leak=0.1)
        #fc2
        fc2 = cnv.fclayer(fc1_relu, BATCH_SIZE, 128, "fc2", wd=WEIGHT_DECAY)
        fc2_relu = ops.leaky_relu(input=fc2, leak=0.1)

        #fc3
        #transforms
        transforms = cnv.fclayer(fc2_relu,
                                 BATCH_SIZE,
                                 sq * 12,
                                 "transforms",
                                 wd=WEIGHT_DECAY)

        return depth, transforms
Example #4
0
def correctness(deconv):

    print deconv(1, 1, 0, 1, 1, 0)
    print deconv(1, 1, 0, 1, 1, 180)
    print deconv(1, 1, 0, 1, 1, 90)
    print deconv(1, 1, 0, 1, 1, 45)

    print deconv(2., 2., 0, 1, 1, 0)
    print deconv(2., 2., 0, 1, 1, 180)
    print deconv(2., 2., 0, 1, 1, 90)
    print deconv(2., 2., 0, 1, 1, 45)

    print deconv(2.7, 1.7, 0, 1, 1, 0)
    print deconv(2.7, 1.7, 0, 1, 1, 180)
    print deconv(2.7, 1.7, 0, 1, 1, 90)
    print deconv(2.7, 1.7, 0, 1, 1, 45)

    print deconv(2.7, 1.7, 10, 1, 1, 0)
    print deconv(2.7, 1.7, 20, 1, 1, 180)
    print deconv(2.7, 1.7, 30, 1, 1, 90)
    print deconv(2.7, 1.7, 40, 1, 1, 45)