Esempio n. 1
0
def inference(
    images, **kwargs
):  #batchSize=None, phase='train', outLayer=[13,13], existingParams=[]
    modelShape = kwargs.get('modelShape')
    wd = None  #0.0002
    USE_FP_16 = kwargs.get('usefp16')
    dtype = tf.float16 if USE_FP_16 else tf.float32

    batchSize = kwargs.get('activeBatchSize', None)

    ############# CONV1 3x3 conv, 2 input dims, 2 parallel modules, 64 output dims (filters)
    fireOut, prevExpandDim = model_base.conv_fire_module(
        'conv1', images, kwargs.get('pngChannels'), {'cnn3x3': modelShape[0]},
        wd, **kwargs)
    # calc batch norm CONV1
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batchnorm1', fireOut, dtype,
                                        kwargs.get('phase'))
    ###### Pooling1 2x2 wit stride 2
    fireOut = tf.nn.max_pool(fireOut,
                             ksize=[1, 2, 2, 1],
                             strides=[1, 2, 2, 1],
                             padding='SAME',
                             name='maxpool1')
    ############# CONV2
    fireOut, prevExpandDim = model_base.conv_fire_module(
        'conv2', fireOut, prevExpandDim, {'cnn3x3': modelShape[1]}, wd,
        **kwargs)
    # calc batch norm CONV2
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batchnorm2', fireOut, dtype,
                                        kwargs.get('phase'))
    ###### Pooling1 2x2 wit stride 2
    fireOut = tf.nn.max_pool(fireOut,
                             ksize=[1, 2, 2, 1],
                             strides=[1, 2, 2, 1],
                             padding='SAME',
                             name='maxpool2')

    #    ############# CONV3 3x3 conv, 2 input dims, 2 parallel modules, 64 output dims (filters)
    #    fireOut, prevExpandDim = model_base.conv_fire_module('conv3', fireOut, prevExpandDim,
    #                                                                  {'cnn3x3': modelShape[2]},
    #                                                                  wd, **kwargs)
    #    # calc batch norm CONV3
    #    if kwargs.get('batchNorm'):
    #        fireOut = model_base.batch_norm('batchnorm3', fireOut, dtype, , kwargs.get('phase'))
    #    ############# CONV2
    #    fireOut, prevExpandDim = model_base.conv_fire_module('conv4', fireOut, prevExpandDim,
    #                                                                  {'cnn3x3': modelShape[3]},
    #                                                                  wd, **kwargs)
    #    # calc batch norm CONV2
    #    if kwargs.get('batchNorm'):
    #        fireOut = model_base.batch_norm('batchnorm4', fireOut, dtype, , kwargs.get('phase'))
    #    ###### Pooling1 2x2 wit stride 2
    #    pool = tf.nn.max_pool(fireOut, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
    #                          padding='SAME', name='maxpool2')
    #    ###### DROPOUT after CONV8
    with tf.name_scope("drop"):
        keepProb = tf.constant(kwargs.get('dropOutKeepRate')
                               if kwargs.get('phase') == 'train' else 1.0,
                               dtype=dtype)
        fireOut = tf.nn.dropout(fireOut, keepProb, name="dropout")
    ###### Prepare for fully connected layers
    # Reshape firout - flatten
    prevExpandDim = fireOut.get_shape()[1] * fireOut.get_shape(
    )[2] * prevExpandDim
    fireOut = tf.reshape(fireOut, [batchSize, -1])

    ############# FC1 layer with 1024 outputs
    fireOut, prevExpandDim = model_base.fc_fire_module('fc1', fireOut,
                                                       prevExpandDim,
                                                       {'fc': modelShape[2]},
                                                       wd, **kwargs)
    # calc batch norm FC1
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batchnorm9', fireOut, dtype,
                                        kwargs.get('phase'))
    ############# FC2 layer with 8 outputs
    fireOut, prevExpandDim = model_base.fc_regression_module(
        'fc2', fireOut, prevExpandDim, {'fc': kwargs.get('networkOutputSize')},
        wd, **kwargs)

    ###### Normalize vectors to have output [0~1] for each batch
    # fireout is [16] x [192]
    # fireout should be [16] x [6] x [32] x [1] => now normalize for each batch and each row
    # To do so, we could rearrange everything in [16 x 6] x [32] and calculate softmax for each row and return back to original
    # kwargs.get('networkOutputSize')/kwargs.get('logicalOutputSize') = kwargs.get('classificationModel')['binSize']
    #fireOut = tf.reshape(fireOut, [kwargs.get('activeBatchSize')*kwargs.get('logicalOutputSize'), np.int32(kwargs.get('networkOutputSize')/kwargs.get#('logicalOutputSize'))])
    #fireOut.set_shape([kwargs.get('activeBatchSize')*kwargs.get('logicalOutputSize'), np.int32(kwargs.get('networkOutputSize')/kwargs.get('logicalOutputSize'))])
    #fireOut = tf.nn.softmax(fireOut)

    #### NOW CONVERT IT TO Correct format
    # kwargs.get('networkOutputSize')/kwargs.get('logicalOutputSize') = kwargs.get('classificationModel')['binSize']

    #fireOut = tf.reshape(fireOut, [kwargs.get('activeBatchSize'),
    #                                   kwargs.get('logicalOutputSize'),
    #                                   np.int32(kwargs.get('networkOutputSize')/(kwargs.get('logicalOutputSize'))),
    #                                   1])
    #fireOut.set_shape([kwargs.get('activeBatchSize'),
    #                       kwargs.get('logicalOutputSize'),
    #                       np.int32(kwargs.get('networkOutputSize')/(kwargs.get('logicalOutputSize'))),
    #                       1])

    return fireOut
Esempio n. 2
0
def inference(
    images, **kwargs
):  #batchSize=None, phase='train', outLayer=[13,13], existingParams=[]
    modelShape = kwargs.get('modelShape')
    wd = None  #0.0002
    USE_FP_16 = kwargs.get('usefp16')
    dtype = tf.float16 if USE_FP_16 else tf.float32

    batchSize = kwargs.get('activeBatchSize', None)

    ############# CONV1 3x3 conv, 2 input dims, 2 parallel modules, 64 output dims (filters)
    fireOut1, prevExpandDim = model_base.conv_fire_module(
        'conv1',
        images,
        kwargs.get('pngChannels'), {'cnn3x3': modelShape[0]},
        wd,
        stride=[1, 2, 2, 1],
        **kwargs)
    ############# CONV2
    fireOut1, prevExpandDim = model_base.conv_fire_module(
        'conv2', fireOut1, prevExpandDim, {'cnn3x3': modelShape[1]}, wd,
        **kwargs)
    ############# CONV3
    fireOut2, prevExpandDim = model_base.conv_fire_module(
        'conv3',
        fireOut1,
        prevExpandDim, {'cnn3x3': modelShape[2]},
        wd,
        stride=[1, 2, 2, 1],
        **kwargs)
    fireOut1 = tf.nn.max_pool(fireOut1,
                              ksize=[1, 8, 8, 1],
                              strides=[1, 8, 8, 1],
                              padding='SAME',
                              name='maxpool1')
    ############# CONV4
    fireOut2, prevExpandDim = model_base.conv_fire_module(
        'conv4', fireOut2, prevExpandDim, {'cnn3x3': modelShape[3]}, wd,
        **kwargs)
    ############# CONV5
    fireOut3, prevExpandDim = model_base.conv_fire_module(
        'conv5',
        fireOut2,
        prevExpandDim, {'cnn3x3': modelShape[4]},
        wd,
        stride=[1, 2, 2, 1],
        **kwargs)
    fireOut2 = tf.nn.max_pool(fireOut2,
                              ksize=[1, 4, 4, 1],
                              strides=[1, 4, 4, 1],
                              padding='SAME',
                              name='maxpool2')
    ############# CONV6
    fireOut3, prevExpandDim = model_base.conv_fire_module(
        'conv6', fireOut3, prevExpandDim, {'cnn3x3': modelShape[5]}, wd,
        **kwargs)
    ############# CONV7
    fireOut4, prevExpandDim = model_base.conv_fire_module(
        'conv7',
        fireOut3,
        prevExpandDim, {'cnn3x3': modelShape[6]},
        wd,
        stride=[1, 2, 2, 1],
        **kwargs)
    fireOut3 = tf.nn.max_pool(fireOut3,
                              ksize=[1, 2, 2, 1],
                              strides=[1, 2, 2, 1],
                              padding='SAME',
                              name='maxpool2')
    ############# CONV8
    fireOut4, prevExpandDim = model_base.conv_fire_module(
        'conv8', fireOut4, prevExpandDim, {'cnn3x3': modelShape[7]}, wd,
        **kwargs)
    # CONCAT
    fireOut1 = tf.concat([fireOut1, fireOut2, fireOut3, fireOut4], axis=3)
    prevExpandDim = int(fireOut1.get_shape()[3])
    ###### DROPOUT after CONV8
    with tf.name_scope("drop"):
        keepProb = tf.constant(kwargs.get('dropOutKeepRate')
                               if kwargs.get('phase') == 'train' else 1.0,
                               dtype=dtype)
        fireOut1 = tf.nn.dropout(fireOut1, keepProb, name="dropout")
    ###### Prepare for fully connected layers
    #fireOut = tf.reshape(fireOut, [batchSize, -1])
    #prevExpandDim = int(fireOut1.get_shape()[1])
    ############## FC1 layer with 1024 outputs
    #fireOut, prevExpandDim = model_base.fc_fire_module('fc1', fireOut, prevExpandDim,
    #                                                   {'fc': modelShape[8]},
    #                                                   wd, **kwargs)
    ############# FC1 layer with 1024 outputs
    fireOut1, prevExpandDim = model_base.conv_fire_inception_module(
        'convFC', fireOut1, prevExpandDim, {'cnnFC': modelShape[8]}, wd,
        **kwargs)
    # [batchsize, 1, 1, prevExpandDim]
    fireOut1 = tf.reshape(fireOut1, [batchSize, prevExpandDim])
    # calc batch norm FC1
    if kwargs.get('batchNorm'):
        fireOut1 = model_base.batch_norm('batchnorm9', fireOut1, dtype,
                                         kwargs.get('phase'))
    ############# FC1 layer with 1024 outputs
    #fireOut1, prevExpandDim = model_base.fc_fire_module('fc2', fireOut1, prevExpandDim,
    #                                                   {'fc': modelShape[9]},
    #                                                   wd, **kwargs)
    ## calc batch norm FC1
    #if kwargs.get('batchNorm'):
    #    fireOut1 = model_base.batch_norm('batchnorm10', fireOut1, dtype, kwargs.get('phase'))
    ############# FC2 layer with 8 outputs
    fireOut1, prevExpandDim = model_base.fc_regression_module(
        'fc3', fireOut1, prevExpandDim,
        {'fc': kwargs.get('networkOutputSize')}, wd, **kwargs)
    return fireOut1
Esempio n. 3
0
def inference(images, **kwargs): #batchSize=None, phase='train', outLayer=[13,13], existingParams=[]

    modelShape = kwargs.get('modelShape')
    wd = None #0.0002
    USE_FP_16 = kwargs.get('usefp16')
    dtype = tf.float16 if USE_FP_16 else tf.float32

    batchSize = kwargs.get('activeBatchSize', None)

    ############# CONV1_TWIN 3x3 conv, 2 input dims, 2 parallel modules, 64 output dims (filters)
    fireOut, prevExpandDim = model_base.conv_fire_parallel_module('conv1', images, kwargs.get('imageChannels'),
                                                                  {'cnn7x7': modelShape[0]},
                                                                  wd, **kwargs)
    # calc batch norm CONV1_TWIN
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ###### Pooling1 2x2 wit stride 2
    pool = tf.nn.max_pool(fireOut, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
                          padding='SAME', name='maxpool1')
    ############# CONV2_TWIN 3x3 conv, 64 input dims, 64 output dims (filters)
    fireOut, prevExpandDim = model_base.conv_fire_parallel_module('conv2', pool, prevExpandDim,
                                                                  {'cnn5x5': modelShape[1]},
                                                                  wd, **kwargs)
    # calc batch norm CONV2_TWIN
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ###### Pooling2 2x2 wit stride 2
    pool = tf.nn.max_pool(fireOut, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
                          padding='SAME', name='maxpool1')
    ############# CONV3_TWIN 3x3 conv, 64 input dims, 64 output dims (filters)
    fireOut, prevExpandDim = model_base.conv_fire_parallel_module('conv3', pool, prevExpandDim,
                                                                  {'cnn3x3': modelShape[2]},
                                                                  wd, **kwargs)
    # calc batch norm CONV3_TWIN
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ###### Pooling3 2x2 wit stride 2
    pool = tf.nn.max_pool(fireOut, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
                          padding='SAME', name='maxpool2')
    ############# CONV4 3x3 conv, 64 input dims, 64 output dims (filters)
    fireOut, prevExpandDim = model_base.conv_fire_module('conv4', pool, prevExpandDim,
                                                         {'cnn3x3': modelShape[4]},
                                                         wd, **kwargs)
    # calc batch norm CONV4
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ############# CONV5 3x3 conv, 64 input dims, 64 output dims (filters)
    fireOut, prevExpandDim = model_base.conv_fire_module('conv5', fireOut, prevExpandDim,
                                                         {'cnn3x3': modelShape[5]},
                                                         wd, **kwargs)
    # calc batch norm CONV5
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ###### Pooling4 2x2 wit stride 2
    pool = tf.nn.max_pool(fireOut, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
                          padding='SAME', name='maxpool3')
    ############# CONV6 3x3 conv, 64 input dims, 64 output dims (filters)
    fireOut, prevExpandDim = model_base.conv_fire_module('conv6', pool, prevExpandDim,
                                                         {'cnn3x3': modelShape[6]},
                                                         wd, **kwargs)
    # calc batch norm CONV6
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ############# CONV7 3x3 conv, 64 input dims, 64 output dims (filters)
    fireOut, prevExpandDim = model_base.conv_fire_module('conv7', fireOut, prevExpandDim,
                                                         {'cnn3x3': modelShape[7]},
                                                         wd, **kwargs)
    # calc batch norm CONV7
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ###### DROPOUT after CONV7
    with tf.name_scope("drop"):
        keepProb = tf.constant(kwargs.get('dropOutKeepRate') if kwargs.get('phase') == 'train' else 1.0, dtype=dtype)
        fireOut = tf.nn.dropout(fireOut, keepProb, name="dropout")
    ###### Prepare for fully connected layers
    # Reshape firout - flatten
    prevExpandDim = (kwargs.get('imageHeight')//(2*2*2*2))*(kwargs.get('imageWidth')//(2*2*2*2))*prevExpandDim
    fireOutFlat = tf.reshape(fireOut, [batchSize, -1])

    ############# FC1 layer with 1024 outputs
    fireOut, prevExpandDim = model_base.fc_fire_module('fc1', fireOutFlat, prevExpandDim,
                                                       {'fc': modelShape[8]},
                                                       wd, **kwargs)
    # calc batch norm FC1
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batch_norm', fireOut, dtype)
    ############# FC2 layer with 8 outputs
    fireOut, prevExpandDim = model_base.fc_regression_module('fc2', fireOut, prevExpandDim,
                                                             {'fc': kwargs.get('outputSize')},
                                                             wd, **kwargs)

    return fireOut
Esempio n. 4
0
def inference(images, **kwargs): #batchSize=None, phase='train', outLayer=[13,13], existingParams=[]
    modelShape = kwargs.get('modelShape')
    wd = None #0.0002
    USE_FP_16 = kwargs.get('usefp16')
    dtype = tf.float16 if USE_FP_16 else tf.float32

    batchSize = kwargs.get('activeBatchSize', None)

    ############# CONV1 3x3 conv, 2 input dims, 2 parallel modules, 64 output dims (filters)
    fireOut1, prevExpandDim = model_base.conv_fire_module('conv1', images, kwargs.get('pngChannels'),
                                                                  {'cnn3x3': 1},
                                                                  wd, **kwargs)
    if kwargs.get('batchNorm'):
        fireOut1 = model_base.batch_norm('batchnorm11', fireOut1, dtype, kwargs.get('phase'))
    fireOut1, prevExpandDim = model_base.conv_fire_module('conv12', images, kwargs.get('pngChannels'),
                                                                  {'cnn1x1': modelShape[0]},
                                                                  wd, **kwargs)
    ## Pooling1 2x2 wit stride 2
    #fireOut1 = tf.nn.max_pool(fireOut1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
    #                      padding='SAME', name='maxpool1')
    # calc batch norm CONV1
    if kwargs.get('batchNorm'):
        fireOut1 = model_base.batch_norm('batchnorm1', fireOut1, dtype, kwargs.get('phase'))
    ############# CONV2
    fireOut1, prevExpandDim = model_base.conv_fire_module('conv2', fireOut1, prevExpandDim,
                                                                  {'cnn3x3': 1},
                                                                  wd, **kwargs)
    if kwargs.get('batchNorm'):
        fireOut1 = model_base.batch_norm('batchnorm22', fireOut1, dtype, kwargs.get('phase'))
    fireOut1, prevExpandDim = model_base.conv_fire_module('conv22', fireOut1, prevExpandDim,
                                                                  {'cnn1x1': modelShape[1]},
                                                                  wd, **kwargs)
    ## Pooling1 2x2 wit stride 2
    #fireOut1 = tf.nn.max_pool(fireOut1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
    #                      padding='SAME', name='maxpool2')
    # calc batch norm CONV2
    if kwargs.get('batchNorm'):
        fireOut1 = model_base.batch_norm('batchnorm2', fireOut1, dtype, kwargs.get('phase'))
    ############# CONV3
    fireOut1, prevExpandDim = model_base.conv_fire_module('conv3', fireOut1, prevExpandDim,
                                                                  {'cnn3x3': 1},
                                                                  wd, **kwargs)
    if kwargs.get('batchNorm'):
        fireOut1 = model_base.batch_norm('batchnorm23', fireOut1, dtype, kwargs.get('phase'))
    fireOut1, prevExpandDim = model_base.conv_fire_module('conv32', fireOut1, prevExpandDim,
                                                                  {'cnn1x1': modelShape[2]},
                                                                  wd, **kwargs)
    # Pooling1 2x2 wit stride 2
    fireOut1 = tf.nn.max_pool(fireOut1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
                          padding='SAME', name='maxpool3')
    # calc batch norm CONV3
    if kwargs.get('batchNorm'):
        fireOut2 = model_base.batch_norm('batchnorm3', fireOut1, dtype, kwargs.get('phase'))
    ############# CONV4
    fireOut2, prevExpandDim = model_base.conv_fire_module('conv4', fireOut2, prevExpandDim,
                                                                  {'cnn3x3': 1},
                                                                  wd, **kwargs)
    if kwargs.get('batchNorm'):
        fireOut2 = model_base.batch_norm('batchnorm42', fireOut2, dtype, kwargs.get('phase'))
    fireOut2, prevExpandDim = model_base.conv_fire_module('conv42', fireOut2, prevExpandDim,
                                                                  {'cnn1x1': modelShape[3]},
                                                                  wd, **kwargs)
    ## Pooling1 2x2 wit stride 2
    #fireOut1 = tf.nn.max_pool(fireOut1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
    #                      padding='SAME', name='maxpool2')
    #fireOut2 = tf.nn.max_pool(fireOut2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
    #                      padding='SAME', name='maxpool4')
    # calc batch norm CONV4
    if kwargs.get('batchNorm'):
        fireOut2 = model_base.batch_norm('batchnorm4', fireOut2, dtype, kwargs.get('phase'))
    ############# CONV5
    fireOut2, prevExpandDim = model_base.conv_fire_module('conv5', fireOut2, prevExpandDim,
                                                                  {'cnn3x3': 1},
                                                         wd, **kwargs)
    # calc batch norm CONV5
    if kwargs.get('batchNorm'):
        fireOut2 = model_base.batch_norm('batchnorm52', fireOut2, dtype, kwargs.get('phase'))
    fireOut2, prevExpandDim = model_base.conv_fire_module('conv52', fireOut2, prevExpandDim,
                                                                  {'cnn1x1': modelShape[4]},
                                                         wd, **kwargs)
    ## Pooling1 2x2 wit stride 2
    #fireOut1 = tf.nn.max_pool(fireOut1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
    #                      padding='SAME', name='maxpool2')
    #fireOut2 = tf.nn.max_pool(fireOut2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
    #                      padding='SAME', name='maxpool5')
    # calc batch norm CONV5
    if kwargs.get('batchNorm'):
        fireOut2 = model_base.batch_norm('batchnorm5', fireOut2, dtype, kwargs.get('phase'))
    ############# CONV6
    fireOut2, prevExpandDim = model_base.conv_fire_module('conv6', fireOut2, prevExpandDim,
                                                                  {'cnn3x3': 1},
                                                         wd, **kwargs)
    if kwargs.get('batchNorm'):
        fireOut2 = model_base.batch_norm('batchnorm62', fireOut2, dtype, kwargs.get('phase'))
    fireOut2, prevExpandDim = model_base.conv_fire_module('conv62', fireOut2, prevExpandDim,
                                                                  {'cnn1x1': modelShape[5]},
                                                         wd, **kwargs)
    ###### Pooling1 2x2 wit stride 2
    fireOut1 = tf.nn.max_pool(fireOut1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
                          padding='SAME', name='maxpool2')
    fireOut2 = tf.nn.max_pool(fireOut2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
                          padding='SAME', name='maxpool2')
    # calc batch norm CONV6
    if kwargs.get('batchNorm'):
        fireOut3 = model_base.batch_norm('batchnorm6', fireOut2, dtype, kwargs.get('phase'))
    ############# CONV7
    fireOut3, prevExpandDim = model_base.conv_fire_module('conv7', fireOut3, prevExpandDim,
                                                                  {'cnn3x3': 1},
                                                         wd, **kwargs)
    if kwargs.get('batchNorm'):
        fireOut3 = model_base.batch_norm('batchnorm72', fireOut3, dtype, kwargs.get('phase'))
    fireOut3, prevExpandDim = model_base.conv_fire_module('conv72', fireOut3, prevExpandDim,
                                                                  {'cnn1x1': modelShape[6]},
                                                         wd, **kwargs)
    # calc batch norm CONV7
    if kwargs.get('batchNorm'):
        fireOut3 = model_base.batch_norm('batchnorm7', fireOut3, dtype, kwargs.get('phase'))
    ############# CONV8
    fireOut3, prevExpandDim = model_base.conv_fire_module('conv8', fireOut3, prevExpandDim,
                                                                  {'cnn3x3': 1},
                                                         wd, **kwargs)
    if kwargs.get('batchNorm'):
        fireOut3 = model_base.batch_norm('batchnorm82', fireOut3, dtype, kwargs.get('phase'))
    fireOut3, prevExpandDim = model_base.conv_fire_module('conv82', fireOut3, prevExpandDim,
                                                                  {'cnn1x1': modelShape[7]},
                                                         wd, **kwargs)
    ###### Pooling1 2x2 wit stride 2
    fireOut1 = tf.nn.max_pool(fireOut1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
                          padding='SAME', name='maxpool3')
    fireOut2 = tf.nn.max_pool(fireOut2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
                          padding='SAME', name='maxpool3')
    fireOut3 = tf.nn.max_pool(fireOut3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
                          padding='SAME', name='maxpool3')
    # calc batch norm CONV8
    if kwargs.get('batchNorm'):
        fireOut3 = model_base.batch_norm('batchnorm8', fireOut3, dtype, kwargs.get('phase'))
    # CONCAT
    fireOut = tf.concat([fireOut1, fireOut2, fireOut3], axis=3)
    prevExpandDim = int(fireOut.get_shape()[3])
    ############# CONV9
    fireOut, prevExpandDim = model_base.conv_fire_module('convFC1', fireOut, prevExpandDim,
                                                             {'cnnFC': modelShape[7]},
                                                             wd, **kwargs)
    fireOut = tf.reshape(fireOut, [batchSize, modelShape[7]])
    prevExpandDim = prevExpandDim = int(fireOut.get_shape()[1])
    ############# CONV9
    fireOut, prevExpandDim = model_base.fc_fire_module('fc0', fireOut, prevExpandDim,
                                                             {'fc': modelShape[8]},
                                                             wd, **kwargs)
    if kwargs.get('batchNorm'):
        fireOut = model_base.batch_norm('batchnorm9', fireOut, dtype, kwargs.get('phase'))   
    ###### DROPOUT after CONV8
    with tf.name_scope("drop"):
        keepProb = tf.constant(kwargs.get('dropOutKeepRate') if kwargs.get('phase') == 'train' else 1.0, dtype=dtype)
        fireOut = tf.nn.dropout(fireOut, keepProb, name="dropout")
    ############# FC2 layer with 8 outputs
    fireOut, prevExpandDim = model_base.fc_regression_module('fc1', fireOut, prevExpandDim,
                                                             {'fc': kwargs.get('networkOutputSize')},
                                                             wd, **kwargs)
    ###### Normalize vectors to have output [0~1] for each batch
    # fireout is [16] x [192] 
    # fireout should be [16] x [6] x [32] x [1] => now normalize for each batch and each row
    # To do so, we could rearrange everything in [16 x 6] x [32] and calculate softmax for each row and return back to original
    # kwargs.get('networkOutputSize')/kwargs.get('logicalOutputSize') = kwargs.get('classificationModel')['binSize']
    #fireOut = tf.reshape(fireOut, [kwargs.get('activeBatchSize')*kwargs.get('logicalOutputSize'), np.int32(kwargs.get('networkOutputSize')/kwargs.get#('logicalOutputSize'))])
    #fireOut.set_shape([kwargs.get('activeBatchSize')*kwargs.get('logicalOutputSize'), np.int32(kwargs.get('networkOutputSize')/kwargs.get('logicalOutputSize'))])
    #fireOut = tf.nn.softmax(fireOut)

    #### NOW CONVERT IT TO Correct format
    # kwargs.get('networkOutputSize')/kwargs.get('logicalOutputSize') = kwargs.get('classificationModel')['binSize']
    
    #fireOut = tf.reshape(fireOut, [kwargs.get('activeBatchSize'), 
    #                                   kwargs.get('logicalOutputSize'), 
    #                                   np.int32(kwargs.get('networkOutputSize')/(kwargs.get('logicalOutputSize'))), 
    #                                   1])
    #fireOut.set_shape([kwargs.get('activeBatchSize'), 
    #                       kwargs.get('logicalOutputSize'), 
    #                       np.int32(kwargs.get('networkOutputSize')/(kwargs.get('logicalOutputSize'))), 
    #                       1])

    return fireOut