Example #1
0
def inference(
    images, **kwargs
):  #batchSize=None, phase='train', outLayer=[13,13], existingParams=[]
    modelShape = kwargs.get('modelShape')
    wd = None  #0.0002
    USE_FP_16 = kwargs.get('usefp16')
    dtype = tf.float16 if USE_FP_16 else tf.float32

    batchSize = kwargs.get('activeBatchSize', None)

    ############# CONV1_TWIN 3x3 conv, 2 input dims, 2 parallel modules, 64 output dims (filters)
    fireOut, prevExpandDim = model_base.conv_fire_parallel_inception_module(
        'conv1', images, kwargs.get('imageDepthChannels'), {
            'cnn1x1': modelShape[0],
            'cnn3x3': modelShape[0],
            'cnn5x5': modelShape[0]
        }, wd, **kwargs)
    # calc batch norm CONV1_TWIN
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ############# CONV2_TWIN
    fireOut, prevExpandDim = model_base.conv_fire_parallel_module(
        'conv2', fireOut, prevExpandDim, {'cnn1x1': modelShape[1]}, wd,
        **kwargs)
    # calc batch norm CONV2_TWIN
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ###### Pooling1 2x2 wit stride 2
    pool = tf.nn.max_pool(fireOut,
                          ksize=[1, 2, 2, 1],
                          strides=[1, 2, 2, 1],
                          padding='SAME',
                          name='maxpool1')
    ############# CONV3_TWIN
    fireOut, prevExpandDim = model_base.conv_fire_parallel_inception_module(
        'conv3', pool, prevExpandDim, {
            'cnn1x1': modelShape[2],
            'cnn3x3': modelShape[2],
            'cnn5x5': modelShape[2]
        }, wd, **kwargs)
    # calc batch norm CONV3_TWIN
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ############# CONV4_TWIN
    fireOut, prevExpandDim = model_base.conv_fire_parallel_module(
        'conv4', fireOut, prevExpandDim, {'cnn1x1': modelShape[3]}, wd,
        **kwargs)
    # calc batch norm CONV4_TWIN
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ###### Pooling2 2x2 wit stride 2
    pool = tf.nn.max_pool(fireOut,
                          ksize=[1, 2, 2, 1],
                          strides=[1, 2, 2, 1],
                          padding='SAME',
                          name='maxpool2')
    ############# CONV5
    fireOut, prevExpandDim = model_base.conv_fire_inception_module(
        'conv5', pool, prevExpandDim, {
            'cnn1x1': modelShape[4],
            'cnn3x3': modelShape[4],
            'cnn5x5': modelShape[4]
        }, wd, **kwargs)
    # calc batch norm CONV5
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ############# CONV6
    fireOut, prevExpandDim = model_base.conv_fire_module(
        'conv6', fireOut, prevExpandDim, {'cnn1x1': modelShape[5]}, wd,
        **kwargs)
    # calc batch norm CONV6
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ###### Pooling2 2x2 wit stride 2
    pool = tf.nn.max_pool(fireOut,
                          ksize=[1, 2, 2, 1],
                          strides=[1, 2, 2, 1],
                          padding='SAME',
                          name='maxpool3')
    ############# CONV7
    fireOut, prevExpandDim = model_base.conv_fire_inception_module(
        'conv7', pool, prevExpandDim, {
            'cnn1x1': modelShape[6],
            'cnn3x3': modelShape[6],
            'cnn5x5': modelShape[6]
        }, wd, **kwargs)
    # calc batch norm CONV7
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ############# CONV8
    fireOut, prevExpandDim = model_base.conv_fire_module(
        'conv8', fireOut, prevExpandDim, {'cnn1x1': modelShape[7]}, wd,
        **kwargs)
    # calc batch norm CONV8
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ###### DROPOUT after CONV8
    with tf.name_scope("drop"):
        keepProb = tf.constant(kwargs.get('dropOutKeepRate')
                               if kwargs.get('phase') == 'train' else 1.0,
                               dtype=dtype)
        fireOut = tf.nn.dropout(fireOut, keepProb, name="dropout")
    ###### Prepare for fully connected layers
    # Reshape firout - flatten
    prevExpandDim = (kwargs.get('imageDepthRows') //
                     (2 * 2 * 2)) * (kwargs.get('imageDepthCols') //
                                     (2 * 2 * 2)) * prevExpandDim
    fireOut = tf.reshape(fireOut, [batchSize, -1])
    ############# FC1 layer with 1024 outputs
    fireOut, prevExpandDim = model_base.fc_fire_module('fc1', fireOut,
                                                       prevExpandDim,
                                                       {'fc': modelShape[8]},
                                                       wd, **kwargs)
    # calc batch norm FC1
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ############# FC2O layer with 1024 outputs - Orientation ##########################
    fireOutO, prevExpandDimO = model_base.fc_fire_module(
        'fc2O', fireOut, prevExpandDim, {'fc': modelShape[9]}, wd, **kwargs)
    # calc batch norm FC2O - Orientation
    if kwargs.get('batchNorm'):
        fireOutO = model_base.batch_norm('batch_norm', fireOutO, dtype)
    ############# FC3O layer with 3 outputs - Orientation
    fireOutO, prevExpandDimO = model_base.fc_regression_module(
        'fc3O', fireOutO, prevExpandDimO,
        {'fc': kwargs.get('networkOutputSize') / 2}, wd, **kwargs)
    ############# FC2O layer with 1024 outputs - Translation ##########################
    fireOutT, prevExpandDimT = model_base.fc_fire_module(
        'fc2T', fireOut, prevExpandDim, {'fc': modelShape[9]}, wd, **kwargs)
    # calc batch norm FC2O - Translation
    if kwargs.get('batchNorm'):
        fireOutT = model_base.batch_norm('batch_norm', fireOutT, dtype)
    ############# FC3O layer with 3 outputs - Translation
    fireOutT, prevExpandDimT = model_base.fc_regression_module(
        'fc2T', fireOutT, prevExpandDimT,
        {'fc': kwargs.get('networkOutputSize') / 2}, wd, **kwargs)
    ####################################################
    # put together orientation and translation
    fireOut = tf.concat([fireOutO, fireOutT], axis=1)
    return fireOut
Example #2
0
def inference(
    images, **kwargs
):  #batchSize=None, phase='train', outLayer=[13,13], existingParams=[]
    modelShape = kwargs.get('modelShape')
    wd = None  #0.0002
    USE_FP_16 = kwargs.get('usefp16')
    dtype = tf.float16 if USE_FP_16 else tf.float32

    batchSize = kwargs.get('activeBatchSize', None)

    ############# CONV1 3x3 conv, 2 input dims, 2 parallel modules, 64 output dims (filters)
    fireOut, prevExpandDim = model_base.conv_fire_inception_module(
        'conv1', images, kwargs.get('pngChannels'), {
            'cnn1x1': modelShape[0],
            'cnn3x3': modelShape[0],
            'cnn5x5': modelShape[0]
        }, wd, **kwargs)
    # calc batch norm CONV1
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batchnorm1', fireOut, dtype)
    ############# CONV2
    fireOut, prevExpandDim = model_base.conv_fire_module(
        'conv2', fireOut, prevExpandDim, {'cnn1x1': modelShape[1]}, wd,
        **kwargs)
    # calc batch norm CONV2
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batchnorm2', fireOut, dtype)
    ###### Pooling1 2x2 wit stride 2
    pool = tf.nn.max_pool(fireOut,
                          ksize=[1, 2, 2, 1],
                          strides=[1, 2, 2, 1],
                          padding='SAME',
                          name='maxpool1')
    ############# CONV3
    fireOut, prevExpandDim = model_base.conv_fire_inception_module(
        'conv3', pool, prevExpandDim, {
            'cnn1x1': modelShape[2],
            'cnn3x3': modelShape[2],
            'cnn5x5': modelShape[2]
        }, wd, **kwargs)
    # calc batch norm CONV3
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batchnorm3', fireOut, dtype)
    ############# CONV4
    fireOut, prevExpandDim = model_base.conv_fire_module(
        'conv4', fireOut, prevExpandDim, {'cnn1x1': modelShape[3]}, wd,
        **kwargs)
    # calc batch norm CONV4
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batchnorm4', fireOut, dtype)
    ###### Pooling2 2x2 wit stride 2
    pool = tf.nn.max_pool(fireOut,
                          ksize=[1, 2, 2, 1],
                          strides=[1, 2, 2, 1],
                          padding='SAME',
                          name='maxpool2')
    ############# CONV5
    fireOut, prevExpandDim = model_base.conv_fire_inception_module(
        'conv5', pool, prevExpandDim, {
            'cnn1x1': modelShape[4],
            'cnn3x3': modelShape[4],
            'cnn5x5': modelShape[4]
        }, wd, **kwargs)
    # calc batch norm CONV5
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batchnorm5', fireOut, dtype)
    ############# CONV6
    fireOut, prevExpandDim = model_base.conv_fire_module(
        'conv6', fireOut, prevExpandDim, {'cnn1x1': modelShape[5]}, wd,
        **kwargs)
    # calc batch norm CONV6
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batchnorm6', fireOut, dtype)
    ###### Pooling2 2x2 wit stride 2
    pool = tf.nn.max_pool(fireOut,
                          ksize=[1, 2, 2, 1],
                          strides=[1, 2, 2, 1],
                          padding='SAME',
                          name='maxpool3')
    ############# CONV7
    fireOut, prevExpandDim = model_base.conv_fire_inception_module(
        'conv7', pool, prevExpandDim, {
            'cnn1x1': modelShape[6],
            'cnn3x3': modelShape[6],
            'cnn5x5': modelShape[6]
        }, wd, **kwargs)
    # calc batch norm CONV7
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batchnorm7', fireOut, dtype)
    ############# CONV8
    fireOut, prevExpandDim = model_base.conv_fire_module(
        'conv8', fireOut, prevExpandDim, {'cnn1x1': modelShape[7]}, wd,
        **kwargs)
    # calc batch norm CONV8
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batchnorm8', fireOut, dtype)
    ###### DROPOUT after CONV8
    with tf.name_scope("drop"):
        keepProb = tf.constant(kwargs.get('dropOutKeepRate')
                               if kwargs.get('phase') == 'train' else 1.0,
                               dtype=dtype)
        fireOut = tf.nn.dropout(fireOut, keepProb, name="dropout")
    ###### Prepare for fully connected layers
    # Reshape firout - flatten
    prevExpandDim = (kwargs.get('pngRows') //
                     (2 * 2 * 2)) * (kwargs.get('pngCols') //
                                     (2 * 2 * 2)) * prevExpandDim
    fireOutFlat = tf.reshape(fireOut, [batchSize, -1])

    ############# FC1 layer with 1024 outputs
    fireOut, prevExpandDim = model_base.fc_fire_module('fc1', fireOutFlat,
                                                       prevExpandDim,
                                                       {'fc': modelShape[8]},
                                                       wd, **kwargs)
    # calc batch norm FC1
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batchnorm9', fireOut, dtype)
    ############# FC2 layer with 8 outputs
    fireOut, prevExpandDim = model_base.fc_regression_module(
        'fc2', fireOut, prevExpandDim, {'fc': kwargs.get('networkOutputSize')},
        wd, **kwargs)

    ###### Normalize vectors to have output [0~1] for each batch
    # fireout is [16] x [192]
    # fireout should be [16] x [6] x [32] x [1] => now normalize for each batch and each row
    # To do so, we could rearrange everything in [16 x 6] x [32] and calculate softmax for each row and return back to original
    # kwargs.get('networkOutputSize')/kwargs.get('logicalOutputSize') = kwargs.get('classificationModel')['binSize']
    #fireOut = tf.reshape(fireOut, [kwargs.get('activeBatchSize')*kwargs.get('logicalOutputSize'), np.int32(kwargs.get('networkOutputSize')/kwargs.get#('logicalOutputSize'))])
    #fireOut.set_shape([kwargs.get('activeBatchSize')*kwargs.get('logicalOutputSize'), np.int32(kwargs.get('networkOutputSize')/kwargs.get('logicalOutputSize'))])
    #fireOut = tf.nn.softmax(fireOut)

    #### NOW CONVERT IT TO Correct format
    # kwargs.get('networkOutputSize')/kwargs.get('logicalOutputSize') = kwargs.get('classificationModel')['binSize']

    #fireOut = tf.reshape(fireOut, [kwargs.get('activeBatchSize'),
    #                                   kwargs.get('logicalOutputSize'),
    #                                   np.int32(kwargs.get('networkOutputSize')/(kwargs.get('logicalOutputSize'))),
    #                                   1])
    #fireOut.set_shape([kwargs.get('activeBatchSize'),
    #                       kwargs.get('logicalOutputSize'),
    #                       np.int32(kwargs.get('networkOutputSize')/(kwargs.get('logicalOutputSize'))),
    #                       1])

    return fireOut
Example #3
0
def inference(
    images, **kwargs
):  #batchSize=None, phase='train', outLayer=[13,13], existingParams=[]
    modelShape = kwargs.get('modelShape')
    wd = None  #0.0002
    USE_FP_16 = kwargs.get('usefp16')
    dtype = tf.float16 if USE_FP_16 else tf.float32

    batchSize = kwargs.get('activeBatchSize', None)
    ############# CONV1_TWIN 3x3 conv, 2 input dims, 2 parallel modules, 64 output dims (filters)
    fireOutSct, prevExpandDimSct = model_base.conv_fire_parallel_inception_module(
        'conv1', images, int(images.get_shape()[3]), {'cnn3x3': modelShape[0]},
        wd, **kwargs)
    # calc batch norm CONV1_TWIN
    if kwargs.get('batchNorm'):
        fireOutSct = model_base.batch_norm('batch_norm', fireOutSct, dtype)
    ###### Pooling1 2x2 wit stride 2 for shortcut
    poolSct = tf.nn.max_pool(fireOutSct,
                             ksize=[1, 2, 2, 1],
                             strides=[1, 2, 2, 1],
                             padding='SAME',
                             name='maxpoolSct1')
    ############# CONV2_TWIN 3x3 conv, 64 input dims, 64 output dims (filters)
    fireOut, prevExpandDim = model_base.conv_fire_parallel_inception_module(
        'conv2', fireOutSct, prevExpandDimSct, {'cnn3x3': modelShape[1]}, wd,
        **kwargs)
    # calc batch norm CONV2_TWIN
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ###### Pooling1 2x2 wit stride 2
    pool = tf.nn.max_pool(fireOut,
                          ksize=[1, 2, 2, 1],
                          strides=[1, 2, 2, 1],
                          padding='SAME',
                          name='maxpool1')
    ############# CONV3_TWIN 3x3 conv, 64 input dims, 64 output dims (filters)
    fireOutSct, prevExpandDimSct = model_base.conv_fire_parallel_inception_module(
        'conv3', pool, prevExpandDim, {'cnn3x3': modelShape[2]}, wd, **kwargs)
    # calc batch norm CONV3_TWIN
    if kwargs.get('batchNorm'):
        fireOutSct = model_base.batch_norm('batch_norm', fireOutSct, dtype)
    ########################## connect shortcut
    fireOutSct, prevExpandDimSct = _shortcut(fireOutSct, poolSct,
                                             kwargs.get('numParallelModules'))
    ###### Pooling2 2x2 wit stride 2 for shortcut
    poolSct = tf.nn.max_pool(fireOutSct,
                             ksize=[1, 2, 2, 1],
                             strides=[1, 2, 2, 1],
                             padding='SAME',
                             name='maxpoolSct2')

    ############# CONV4_TWIN 3x3 conv, 64 input dims, 64 output dims (filters)
    fireOut, prevExpandDim = model_base.conv_fire_parallel_inception_module(
        'conv4', fireOutSct, prevExpandDimSct, {'cnn3x3': modelShape[3]}, wd,
        **kwargs)
    # calc batch norm CONV4_TWIN
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ###### Pooling2 2x2 wit stride 2
    pool = tf.nn.max_pool(fireOut,
                          ksize=[1, 2, 2, 1],
                          strides=[1, 2, 2, 1],
                          padding='SAME',
                          name='maxpool2')
    ############# CONV5 3x3 conv, 64 input dims, 64 output dims (filters)
    fireOutSct, prevExpandDimSct = model_base.conv_fire_parallel_inception_module(
        'conv5', pool, prevExpandDim, {'cnn3x3': modelShape[4]}, wd, **kwargs)
    # calc batch norm CONV5
    if kwargs.get('batchNorm'):
        fireOutSct = model_base.batch_norm('batch_norm', fireOutSct, dtype)
    ########################## connect shortcut
    fireOutSct, prevExpandDimSct = _shortcut(fireOutSct, poolSct,
                                             kwargs.get('numParallelModules'))
    ###### Pooling2 2x2 wit stride 2 for shortcut
    poolSct = tf.nn.max_pool(fireOutSct,
                             ksize=[1, 2, 2, 1],
                             strides=[1, 2, 2, 1],
                             padding='SAME',
                             name='maxpoolSct3')

    ############# CONV6 3x3 conv, 64 input dims, 64 output dims (filters)
    fireOut, prevExpandDim = model_base.conv_fire_parallel_inception_module(
        'conv6', fireOutSct, prevExpandDimSct, {'cnn3x3': modelShape[5]}, wd,
        **kwargs)
    # calc batch norm CONV6
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ###### Pooling2 2x2 wit stride 2
    pool = tf.nn.max_pool(fireOut,
                          ksize=[1, 2, 2, 1],
                          strides=[1, 2, 2, 1],
                          padding='SAME',
                          name='maxpool3')
    ############# CONV7 3x3 conv, 64 input dims, 64 output dims (filters)
    fireOutSct, prevExpandDim = model_base.conv_fire_parallel_inception_module(
        'conv7', pool, prevExpandDim, {'cnn3x3': modelShape[6]}, wd, **kwargs)
    # calc batch norm CONV7
    if kwargs.get('batchNorm'):
        fireOutSct = model_base.batch_norm('batch_norm', fireOutSct, dtype)
    ########################## connect shortcut
    fireOutSct, prevExpandDimSct = _shortcut(fireOutSct, poolSct,
                                             kwargs.get('numParallelModules'))

    ############# CONV8 3x3 conv, 64 input dims, 64 output dims (filters)
    fireOut, prevExpandDim = model_base.conv_fire_parallel_inception_module(
        'conv8', fireOutSct, prevExpandDimSct, {'cnn3x3': modelShape[7]}, wd,
        **kwargs)
    # calc batch norm CONV8
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ###### DROPOUT after CONV8
    with tf.name_scope("drop"):
        keepProb = tf.constant(kwargs.get('dropOutKeepRate')
                               if kwargs.get('phase') == 'train' else 1.0,
                               dtype=dtype)
        fireOut = tf.nn.dropout(fireOut, keepProb, name="dropout")
    ###### Prepare for fully connected layers

    # Reshape firout - flatten
    # prevExpandDim = (kwargs.get('imageDepthRows')//(2*2*2))*(kwargs.get('imageDepthCols')//(2*2*2))*prevExpandDim
    # fireOutFlat = tf.reshape(fireOut, [batchSize, -1])
    #########ALTERNATIVE
    numParallelModules = kwargs.get('numParallelModules')  # 2
    # Twin network -> numParallelModules = 2
    # Split tensor through last dimension into numParallelModules tensors
    prevLayerIndivDims = prevExpandDim / numParallelModules
    prevExpandDim = int(fireOut.get_shape()[1]) * int(
        fireOut.get_shape()[2]) * prevLayerIndivDims
    fireOut = tf.split(fireOut, numParallelModules, axis=3)
    for prl in range(numParallelModules):
        fireOutFlatPrl = tf.reshape(fireOut[prl], [batchSize, -1])
        if prl is 0:
            fireOutFlat = fireOutFlatPrl
        else:
            fireOutFlat = tf.concat([fireOutFlat, fireOutFlatPrl], axis=1)

    #########TO BE REMOVED AND FIXED INSIDE FC_FIRE_PARALLEL MODULE BY SIMPLY CHANGING SPLIT AXIS TO 1
    ############# Parallel FC layer with 1024 outputs
    fireOut, prevExpandDim = model_base.fc_fire_parallel_module(
        'pfc1', fireOutFlat, prevExpandDim, {'pfc': modelShape[8]}, wd,
        **kwargs)
    # calc batch norm FC1
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ############# FC2 layer with 8 outputs
    fireOut, prevExpandDim = model_base.fc_regression_module(
        'fc1', fireOut, prevExpandDim, {'fc': kwargs.get('networkOutputSize')},
        wd, **kwargs)
    return fireOut
Example #4
0
def inference(images, **kwargs): #batchSize=None, phase='train', outLayer=[13,13], existingParams=[]

    modelShape = kwargs.get('modelShape')
    wd = None #0.0002
    USE_FP_16 = kwargs.get('usefp16')
    dtype = tf.float16 if USE_FP_16 else tf.float32

    batchSize = kwargs.get('activeBatchSize', None)

    ############# CONV1 3x3 conv, 2 input dims, 2 parallel modules, 64 output dims (filters)
    fireOut, prevExpandDim = model_base.conv_fire_module('conv1', images, kwargs.get('imageChannels'),
                                              {'cnn3x3': modelShape[0]},
                                              wd, **kwargs)
    # calc batch norm CONV1
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ############# CONV2 3x3 conv, 64 input dims, 64 output dims (filters)
    fireOut, prevExpandDim = model_base.conv_fire_module('conv2', fireOut, prevExpandDim,
                                              {'cnn3x3': modelShape[1]},
                                              wd, **kwargs)
    # calc batch norm CONV2
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ###### Pooling1 2x2 wit stride 2
    pool = tf.nn.max_pool(fireOut, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
                          padding='SAME', name='maxpool1')
    ############# CONV3 3x3 conv, 64 input dims, 64 output dims (filters)
    fireOut, prevExpandDim = model_base.conv_fire_module('conv3', pool, prevExpandDim,
                                              {'cnn3x3': modelShape[2]},
                                              wd, **kwargs)
    # calc batch norm CONV3
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ############# CONV4 3x3 conv, 64 input dims, 64 output dims (filters)
    fireOut, prevExpandDim = model_base.conv_fire_module('conv4', fireOut, prevExpandDim,
                                              {'cnn3x3': modelShape[3]},
                                              wd, **kwargs)
    # calc batch norm CONV4
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ###### Pooling2 2x2 wit stride 2
    pool = tf.nn.max_pool(fireOut, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
                          padding='SAME', name='maxpool2')
    ############# CONV5 3x3 conv, 64 input dims, 64 output dims (filters)
    fireOut, prevExpandDim = model_base.conv_fire_module('conv5', pool, prevExpandDim,
                                              {'cnn3x3': modelShape[4]},
                                              wd, **kwargs)
    # calc batch norm CONV5
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ############# CONV6 3x3 conv, 64 input dims, 64 output dims (filters)
    fireOut, prevExpandDim = model_base.conv_fire_module('conv6', fireOut, prevExpandDim,
                                              {'cnn3x3': modelShape[5]},
                                              wd, **kwargs)
    # calc batch norm CONV6
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ###### Pooling2 2x2 wit stride 2
    pool = tf.nn.max_pool(fireOut, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
                          padding='SAME', name='maxpool3')
    ############# CONV7 3x3 conv, 64 input dims, 64 output dims (filters)
    fireOut, prevExpandDim = model_base.conv_fire_module('conv7', pool, prevExpandDim,
                                              {'cnn3x3': modelShape[6]},
                                              wd, **kwargs)
    # calc batch norm CONV7
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ############# CONV8 3x3 conv, 64 input dims, 64 output dims (filters)
    fireOut, prevExpandDim = model_base.conv_fire_module('conv8', fireOut, prevExpandDim,
                                              {'cnn3x3': modelShape[7]},
                                              wd, **kwargs)
    # calc batch norm CONV8
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ###### DROPOUT after CONV8
    with tf.name_scope("drop"):
        keepProb = tf.constant(kwargs.get('dropOutKeepRate') if kwargs.get('phase')=='train' else 1.0, dtype=dtype)
        fireOut = tf.nn.dropout(fireOut, keepProb, name="dropout")
    ###### Prepare for fully connected layers
    # Reshape firout - flatten 
    prevExpandDim = (kwargs.get('imageHeight')//(2*2*2))*(kwargs.get('imageWidth')//(2*2*2))*prevExpandDim
    fireOutFlat = tf.reshape(fireOut, [batchSize, -1])
    
    ############# FC1 layer with 1024 outputs
    fireOut, prevExpandDim = model_base.fc_fire_module('fc1', fireOutFlat, prevExpandDim,
                                            {'fc': 1024},
                                            wd, **kwargs)
    # calc batch norm FC1
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ############# FC2 layer with 8 outputs
    fireOut, prevExpandDim = model_base.fc_regression_module('fc2', fireOut, prevExpandDim,
                                            {'fc': kwargs.get('outputSize')},
                                            wd, **kwargs)

    return fireOut
Example #5
0
def inference(
    images, **kwargs
):  #batchSize=None, phase='train', outLayer=[13,13], existingParams=[]

    modelShape = kwargs.get('modelShape')
    wd = None  #0.0002
    USE_FP_16 = kwargs.get('usefp16')
    dtype = tf.float16 if USE_FP_16 else tf.float32

    existingParams = kwargs.get('existingParams')

    batchSize = kwargs.get('activeBatchSize', None)

    ############# CONV1_TWIN 3x3 conv, 2 input dims, 2 parallel modules, 64 output dims (filters)
    fireOut, prevExpandDim = model_base.conv_fire_parallel_module(
        'conv1', images, kwargs.get('imageChannels'),
        {'cnn3x3': modelShape[0]}, wd, **kwargs)
    # calc batch norm CONV1_TWIN
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ############# CONV2_TWIN 3x3 conv, 64 input dims, 64 output dims (filters)
    fireOut, prevExpandDim = model_base.conv_fire_parallel_module(
        'conv2', fireOut, prevExpandDim, {'cnn3x3': modelShape[1]}, wd,
        **kwargs)
    # calc batch norm CONV2_TWIN
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ###### Pooling1 2x2 wit stride 2
    fireOut = tf.nn.max_pool(fireOut,
                             ksize=[1, 2, 2, 1],
                             strides=[1, 2, 2, 1],
                             padding='SAME',
                             name='maxpool1')
    ############# CONV3_TWIN 3x3 conv, 64 input dims, 64 output dims (filters)
    fireOut, prevExpandDim = model_base.conv_fire_parallel_module(
        'conv3', fireOut, prevExpandDim, {'cnn3x3': modelShape[2]}, wd,
        **kwargs)
    # calc batch norm CONV3_TWIN
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ############# CONV4_TWIN 3x3 conv, 64 input dims, 64 output dims (filters)
    fireOut, prevExpandDim = model_base.conv_fire_parallel_module(
        'conv4', fireOut, prevExpandDim, {'cnn3x3': modelShape[3]}, wd,
        **kwargs)
    # calc batch norm CONV4_TWIN
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ###### Pooling2 2x2 wit stride 2
    fireOut = tf.nn.max_pool(fireOut,
                             ksize=[1, 2, 2, 1],
                             strides=[1, 2, 2, 1],
                             padding='SAME',
                             name='maxpool2')

    ############# AUGMENT TWIN NETWORK WITH MATCHING DATA AND AUGMENT THE OUTPUT
    corr, corrDims = model_base.twin_correlation('corr', fireOut,
                                                 prevExpandDim, 20, 2,
                                                 **kwargs)
    fireOut, prevExpandDim = model_base.conv_fire_parallel_module(
        'conv_redir', fireOut, prevExpandDim, {'cnn3x3': 32}, wd,
        **kwargs)  #################################
    fireOut = tf.concat(3, [corr, fireOut])
    prevExpandDim = (prevExpandDim / 2) + corrDims
    ############# CONV5 3x3 conv, 64 input dims, 64 output dims (filters)
    fireOut, prevExpandDim = model_base.conv_fire_module(
        'conv5', fireOut, prevExpandDim, {'cnn3x3': modelShape[4]}, wd,
        **kwargs)
    # calc batch norm CONV5
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ############# CONV6 3x3 conv, 64 input dims, 64 output dims (filters)
    fireOut, prevExpandDim = model_base.conv_fire_module(
        'conv6', fireOut, prevExpandDim, {'cnn3x3': modelShape[5]}, wd,
        **kwargs)
    # calc batch norm CONV6
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ###### Pooling2 2x2 wit stride 2
    fireOut = tf.nn.max_pool(fireOut,
                             ksize=[1, 2, 2, 1],
                             strides=[1, 2, 2, 1],
                             padding='SAME',
                             name='maxpool3')
    ############# CONV7 3x3 conv, 64 input dims, 64 output dims (filters)
    fireOut, prevExpandDim = model_base.conv_fire_module(
        'conv7', fireOut, prevExpandDim, {'cnn3x3': modelShape[6]}, wd,
        **kwargs)
    # calc batch norm CONV7
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ############# CONV8 3x3 conv, 64 input dims, 64 output dims (filters)
    fireOut, prevExpandDim = model_base.conv_fire_module(
        'conv8', fireOut, prevExpandDim, {'cnn3x3': modelShape[7]}, wd,
        **kwargs)
    # calc batch norm CONV8
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ###### DROPOUT after CONV8
    with tf.name_scope("drop"):
        keepProb = tf.constant(kwargs.get('dropOutKeepRate')
                               if kwargs.get('phase') == 'train' else 1.0,
                               dtype=dtype)
        fireOut = tf.nn.dropout(fireOut, keepProb, name="dropout")
    ###### Prepare for fully connected layers
    # Reshape firout - flatten           maxpool*maxpool*maxpool         maxpool*maxpool*maxpool
    prevExpandDim = (kwargs.get('imageSize') //
                     (2 * 2 * 2)) * (kwargs.get('imageSize') //
                                     (2 * 2 * 2)) * prevExpandDim
    fireOutFlat = tf.reshape(fireOut, [batchSize, -1])

    ############# FC1 layer with 1024 outputs
    fireOut, prevExpandDim = model_base.fc_fire_module('fc1', fireOutFlat,
                                                       prevExpandDim,
                                                       {'fc': modelShape[8]},
                                                       wd, **kwargs)
    # calc batch norm FC1
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ############# FC2 layer with 8 outputs
    fireOut, prevExpandDim = model_base.fc_regression_module(
        'fc2', fireOut, prevExpandDim, {'fc': kwargs.get('outputSize')}, wd,
        **kwargs)

    return fireOut