Пример #1
0
def CAM_net2D(x, nlabels, training, scope_reuse=False):

    with tf.variable_scope('classifier') as scope:

        if scope_reuse:
            scope.reuse_variables()

        init_filters = 32

        conv1_1 = layers.conv2D_layer_bn(x, 'conv1_1', num_filters=init_filters, training=training)

        pool1 = layers.maxpool2D_layer(conv1_1)

        conv2_1 = layers.conv2D_layer_bn(pool1, 'conv2_1', num_filters=init_filters*2, training=training)

        pool2 = layers.maxpool2D_layer(conv2_1)

        conv3_1 = layers.conv2D_layer_bn(pool2, 'conv3_1', num_filters=init_filters*4, training=training)
        conv3_2 = layers.conv2D_layer_bn(conv3_1, 'conv3_2', num_filters=init_filters*4, training=training)

        conv4_1 = layers.conv2D_layer_bn(conv3_2, 'conv4_1', num_filters=init_filters*8, training=training)
        conv4_2 = layers.conv2D_layer_bn(conv4_1, 'conv4_2', num_filters=init_filters*8, training=training)

        conv5_1 = layers.conv2D_layer_bn(conv4_2, 'conv5_1', num_filters=init_filters*16, training=training)
        conv5_2 = layers.conv2D_layer_bn(conv5_1, 'conv5_2', num_filters=init_filters*16, training=training)

        convD_1 = layers.conv2D_layer_bn(conv5_2, 'feature_maps', num_filters=init_filters*16, training=training)

        fm_averages = layers.averagepool2D_layer(convD_1, name='fm_averages')

        logits = layers.dense_layer(fm_averages, 'weight_layer', hidden_units=nlabels, activation=tf.identity, add_bias=False)

    return logits
Пример #2
0
def normalnet_deeper2D(x, nlabels, training, scope_reuse=False):

    with tf.variable_scope('classifier') as scope:

        if scope_reuse:
            scope.reuse_variables()

        init_filters = 32

        conv1_1 = layers.conv2D_layer_bn(x, 'conv1_1', num_filters=init_filters, training=training)

        pool1 = layers.maxpool2D_layer(conv1_1)

        conv2_1 = layers.conv2D_layer_bn(pool1, 'conv2_1', num_filters=init_filters*2, training=training)

        pool2 = layers.maxpool2D_layer(conv2_1)

        conv3_1 = layers.conv2D_layer_bn(pool2, 'conv3_1', num_filters=init_filters*4, training=training)
        conv3_2 = layers.conv2D_layer_bn(conv3_1, 'conv3_2', num_filters=init_filters*4, training=training)

        pool3 = layers.maxpool2D_layer(conv3_2)

        conv4_1 = layers.conv2D_layer_bn(pool3, 'conv4_1', num_filters=init_filters*8, training=training)
        conv4_2 = layers.conv2D_layer_bn(conv4_1, 'conv4_2', num_filters=init_filters*8, training=training)

        pool4 = layers.maxpool2D_layer(conv4_2)

        conv5_1 = layers.conv2D_layer_bn(pool4, 'conv5_1', num_filters=init_filters*16, training=training)
        conv5_2 = layers.conv2D_layer_bn(conv5_1, 'conv5_2', num_filters=init_filters*16, training=training)

        pool5 = layers.maxpool2D_layer(conv5_2)

        conv6_1 = layers.conv2D_layer_bn(pool5, 'conv6_1', num_filters=init_filters*16, training=training)
        conv6_2 = layers.conv2D_layer_bn(conv6_1, 'conv6_2', num_filters=init_filters*16, training=training)

        dense1 = layers.dense_layer_bn(conv6_2, 'dense1', hidden_units=init_filters*16, training=training)

        logits = layers.dense_layer_bn(dense1, 'dense2', hidden_units=nlabels, training=training, activation=tf.identity)


    return logits
Пример #3
0
def rebuttalnet2D(x, nlabels, training, scope_reuse=False):

    with tf.variable_scope('classifier') as scope:

        if scope_reuse:
            scope.reuse_variables()

        init_filters = 32

        conv1_1 = layers.conv2D_layer_bn(x, 'conv1_1', num_filters=init_filters, training=training)

        pool1 = layers.maxpool2D_layer(conv1_1)

        conv2_1 = layers.conv2D_layer_bn(pool1, 'conv2_1', num_filters=init_filters*2, training=training)

        pool2 = layers.maxpool2D_layer(conv2_1)

        conv3_1 = layers.conv2D_layer_bn(pool2, 'conv3_1', num_filters=init_filters*4, training=training)
        conv3_2 = layers.conv2D_layer_bn(conv3_1, 'conv3_2', num_filters=init_filters*4, training=training)

        pool3 = layers.maxpool2D_layer(conv3_2)

        conv4_1 = layers.conv2D_layer_bn(pool3, 'conv4_1', num_filters=init_filters*8, training=training)
        conv4_2 = layers.conv2D_layer_bn(conv4_1, 'conv4_2', num_filters=init_filters*8, training=training)

        pool4 = layers.maxpool2D_layer(conv4_2)

        conv5_1 = layers.conv2D_layer_bn(pool4, 'conv5_1', num_filters=init_filters*16, training=training)
        conv5_2 = layers.conv2D_layer_bn(conv5_1, 'conv5_2', num_filters=init_filters*16, training=training)

        convD_1 = layers.conv2D_layer_bn(conv5_2, 'convD_1', num_filters=init_filters*16, training=training)

        avg_pool = layers.averagepool2D_layer(convD_1, name='avg_pool')

        logits = layers.dense_layer_bn(avg_pool, 'dense2', hidden_units=nlabels, training=training, activation=tf.identity)


    return logits
Пример #4
0
def C3D_fcn_16_2D_bn(x, training, scope_name='critic', scope_reuse=False):

    with tf.variable_scope(scope_name) as scope:
        if scope_reuse:
            scope.reuse_variables()

        conv1_1 = layers.conv2D_layer_bn(x, 'conv1_1', num_filters=16, training=training)

        pool1 = layers.maxpool2D_layer(conv1_1)

        conv2_1 = layers.conv2D_layer_bn(pool1, 'conv2_1', num_filters=32, training=training)

        pool2 = layers.maxpool2D_layer(conv2_1)

        conv3_1 = layers.conv2D_layer_bn(pool2, 'conv3_1', num_filters=64, training=training)
        conv3_2 = layers.conv2D_layer_bn(conv3_1, 'conv3_2', num_filters=64, training=training)

        pool3 = layers.maxpool2D_layer(conv3_2)

        conv4_1 = layers.conv2D_layer_bn(pool3, 'conv4_1', num_filters=128, training=training)
        conv4_2 = layers.conv2D_layer_bn(conv4_1, 'conv4_2', num_filters=128,training=training)

        pool4 = layers.maxpool2D_layer(conv4_2)

        conv5_1 = layers.conv2D_layer_bn(pool4, 'conv5_1', num_filters=256, training=training)
        conv5_2 = layers.conv2D_layer_bn(conv5_1, 'conv5_2', num_filters=256, training=training)

        convD_1 = layers.conv2D_layer_bn(conv5_2, 'convD_1', num_filters=256, training=training)
        convD_2 = layers.conv2D_layer(convD_1,
                                         'convD_2',
                                         num_filters=1,
                                         kernel_size=(1,1,1),
                                         activation=tf.identity)

        logits = layers.averagepool2D_layer(convD_2, name='diagnosis_avg')


    return logits
def forward(images, training, nlabels):

    images_padded = tf.pad(images, [[0, 0], [92, 92], [92, 92], [0, 0]],
                           'CONSTANT')

    conv1_1 = layers.conv2D_layer_bn(images_padded,
                                     'conv1_1',
                                     num_filters=64,
                                     training=training,
                                     padding='VALID')
    conv1_2 = layers.conv2D_layer_bn(conv1_1,
                                     'conv1_2',
                                     num_filters=64,
                                     training=training,
                                     padding='VALID')

    pool1 = layers.max_pool_layer2d(conv1_2, 'pool_1')

    conv2_1 = layers.conv2D_layer_bn(pool1,
                                     'conv2_1',
                                     num_filters=128,
                                     training=training,
                                     padding='VALID')
    conv2_2 = layers.conv2D_layer_bn(conv2_1,
                                     'conv2_2',
                                     num_filters=128,
                                     training=training,
                                     padding='VALID')

    pool2 = layers.max_pool_layer2d(conv2_2, 'pool_2')

    conv3_1 = layers.conv2D_layer_bn(pool2,
                                     'conv3_1',
                                     num_filters=256,
                                     training=training,
                                     padding='VALID')
    conv3_2 = layers.conv2D_layer_bn(conv3_1,
                                     'conv3_2',
                                     num_filters=256,
                                     training=training,
                                     padding='VALID')

    pool3 = layers.max_pool_layer2d(conv3_2, 'pool_3')

    conv4_1 = layers.conv2D_layer_bn(pool3,
                                     'conv4_1',
                                     num_filters=512,
                                     training=training,
                                     padding='VALID')
    conv4_2 = layers.conv2D_layer_bn(conv4_1,
                                     'conv4_2',
                                     num_filters=512,
                                     training=training,
                                     padding='VALID')

    pool4 = layers.max_pool_layer2d(conv4_2, 'pool_4')

    conv5_1 = layers.conv2D_layer_bn(pool4,
                                     'conv5_1',
                                     num_filters=1024,
                                     training=training,
                                     padding='VALID')
    conv5_2 = layers.conv2D_layer_bn(conv5_1,
                                     'conv5_2',
                                     num_filters=1024,
                                     training=training,
                                     padding='VALID')

    upconv4 = layers.deconv2D_layer_bn(conv5_2,
                                       name='upconv4',
                                       kernel_size=(4, 4),
                                       strides=(2, 2),
                                       num_filters=nlabels,
                                       training=training)
    concat4 = layers.crop_and_concat_layer([upconv4, conv4_2],
                                           'crop_concat_4',
                                           axis=3)

    conv6_1 = layers.conv2D_layer_bn(concat4,
                                     'conv6_1',
                                     num_filters=512,
                                     training=training,
                                     padding='VALID')
    conv6_2 = layers.conv2D_layer_bn(conv6_1,
                                     'conv6_2',
                                     num_filters=512,
                                     training=training,
                                     padding='VALID')

    upconv3 = layers.deconv2D_layer_bn(conv6_2,
                                       name='upconv3',
                                       kernel_size=(4, 4),
                                       strides=(2, 2),
                                       num_filters=nlabels,
                                       training=training)

    concat3 = layers.crop_and_concat_layer([upconv3, conv3_2],
                                           'crop_concat_3',
                                           axis=3)

    conv7_1 = layers.conv2D_layer_bn(concat3,
                                     'conv7_1',
                                     num_filters=256,
                                     training=training,
                                     padding='VALID')
    conv7_2 = layers.conv2D_layer_bn(conv7_1,
                                     'conv7_2',
                                     num_filters=256,
                                     training=training,
                                     padding='VALID')

    upconv2 = layers.deconv2D_layer_bn(conv7_2,
                                       name='upconv2',
                                       kernel_size=(4, 4),
                                       strides=(2, 2),
                                       num_filters=nlabels,
                                       training=training)
    concat2 = layers.crop_and_concat_layer([upconv2, conv2_2],
                                           'crop_concat_2',
                                           axis=3)

    conv8_1 = layers.conv2D_layer_bn(concat2,
                                     'conv8_1',
                                     num_filters=128,
                                     training=training,
                                     padding='VALID')
    conv8_2 = layers.conv2D_layer_bn(conv8_1,
                                     'conv8_2',
                                     num_filters=128,
                                     training=training,
                                     padding='VALID')

    upconv1 = layers.deconv2D_layer_bn(conv8_2,
                                       name='upconv1',
                                       kernel_size=(4, 4),
                                       strides=(2, 2),
                                       num_filters=nlabels,
                                       training=training)
    concat1 = layers.crop_and_concat_layer([upconv1, conv1_2],
                                           'crop_concat_1',
                                           axis=3)

    conv9_1 = layers.conv2D_layer_bn(concat1,
                                     'conv9_1',
                                     num_filters=64,
                                     training=training,
                                     padding='VALID')
    conv9_2 = layers.conv2D_layer_bn(conv9_1,
                                     'conv9_2',
                                     num_filters=64,
                                     training=training,
                                     padding='VALID')

    pred = layers.conv2D_layer_bn(conv9_2,
                                  'pred',
                                  num_filters=nlabels,
                                  kernel_size=(1, 1),
                                  activation=tf.identity,
                                  training=training,
                                  padding='VALID')

    return pred
Пример #6
0
def forward(images, training, nlabels):

    conv1_1 = layers.conv2D_layer_bn(images,
                                     'conv1_1',
                                     num_filters=64,
                                     training=training,
                                     padding='SAME')
    conv1_2 = layers.conv2D_layer_bn(conv1_1,
                                     'conv1_2',
                                     num_filters=64,
                                     training=training,
                                     padding='SAME')

    pool1 = layers.max_pool_layer2d(conv1_2, 'pool_1')

    conv2_1 = layers.conv2D_layer_bn(pool1,
                                     'conv2_1',
                                     num_filters=128,
                                     training=training,
                                     padding='SAME')
    conv2_2 = layers.conv2D_layer_bn(conv2_1,
                                     'conv2_2',
                                     num_filters=128,
                                     training=training,
                                     padding='SAME')

    pool2 = layers.max_pool_layer2d(conv2_2, 'pool_2')
    dout2 = layers.dropout_layer(pool2, 'dropout_2', training)

    conv3_1 = layers.conv2D_layer_bn(dout2,
                                     'conv3_1',
                                     num_filters=256,
                                     training=training,
                                     padding='SAME')
    conv3_2 = layers.conv2D_layer_bn(conv3_1,
                                     'conv3_2',
                                     num_filters=256,
                                     training=training,
                                     padding='SAME')

    pool3 = layers.max_pool_layer2d(conv3_2, 'pool_3')
    dout3 = layers.dropout_layer(pool3, 'dropout_3', training)

    conv4_1 = layers.conv2D_layer_bn(dout3,
                                     'conv4_1',
                                     num_filters=512,
                                     training=training,
                                     padding='SAME')
    conv4_2 = layers.conv2D_layer_bn(conv4_1,
                                     'conv4_2',
                                     num_filters=512,
                                     training=training,
                                     padding='SAME')

    pool4 = layers.max_pool_layer2d(conv4_2, 'pool_4')
    dout4 = layers.dropout_layer(pool4, 'dropout_4', training)

    conv5_1 = layers.conv2D_layer_bn(dout4,
                                     'conv5_1',
                                     num_filters=1024,
                                     training=training,
                                     padding='SAME')
    conv5_2 = layers.conv2D_layer_bn(conv5_1,
                                     'conv5_2',
                                     num_filters=1024,
                                     training=training,
                                     padding='SAME')

    upconv4 = layers.deconv2D_layer_bn(conv5_2,
                                       name='upconv4',
                                       kernel_size=(4, 4),
                                       strides=(2, 2),
                                       num_filters=nlabels,
                                       training=training)
    concat4 = layers.crop_and_concat_layer([upconv4, conv4_2],
                                           'crop_concat_4',
                                           axis=3)
    dout5 = layers.dropout_layer(concat4, 'dropout_5', training)

    conv6_1 = layers.conv2D_layer_bn(dout5,
                                     'conv6_1',
                                     num_filters=512,
                                     training=training,
                                     padding='SAME')
    conv6_2 = layers.conv2D_layer_bn(conv6_1,
                                     'conv6_2',
                                     num_filters=512,
                                     training=training,
                                     padding='SAME')

    upconv3 = layers.deconv2D_layer_bn(conv6_2,
                                       name='upconv3',
                                       kernel_size=(4, 4),
                                       strides=(2, 2),
                                       num_filters=nlabels,
                                       training=training)
    concat3 = layers.crop_and_concat_layer([upconv3, conv3_2],
                                           'crop_concat_3',
                                           axis=3)
    dout6 = layers.dropout_layer(concat3, 'dropout_6', training)

    conv7_1 = layers.conv2D_layer_bn(dout6,
                                     'conv7_1',
                                     num_filters=256,
                                     training=training,
                                     padding='SAME')
    conv7_2 = layers.conv2D_layer_bn(conv7_1,
                                     'conv7_2',
                                     num_filters=256,
                                     training=training,
                                     padding='SAME')

    upconv2 = layers.deconv2D_layer_bn(conv7_2,
                                       name='upconv2',
                                       kernel_size=(4, 4),
                                       strides=(2, 2),
                                       num_filters=nlabels,
                                       training=training)
    concat2 = layers.crop_and_concat_layer([upconv2, conv2_2],
                                           'crop_concat_2',
                                           axis=3)
    dout7 = layers.dropout_layer(concat2, 'dropout_7', training)

    conv8_1 = layers.conv2D_layer_bn(dout7,
                                     'conv8_1',
                                     num_filters=128,
                                     training=training,
                                     padding='SAME')
    conv8_2 = layers.conv2D_layer_bn(conv8_1,
                                     'conv8_2',
                                     num_filters=128,
                                     training=training,
                                     padding='SAME')

    upconv1 = layers.deconv2D_layer_bn(conv8_2,
                                       name='upconv1',
                                       kernel_size=(4, 4),
                                       strides=(2, 2),
                                       num_filters=nlabels,
                                       training=training)
    concat1 = layers.crop_and_concat_layer([upconv1, conv1_2],
                                           'crop_concat_1',
                                           axis=3)

    conv9_1 = layers.conv2D_layer_bn(concat1,
                                     'conv9_1',
                                     num_filters=64,
                                     training=training,
                                     padding='SAME')
    conv9_2 = layers.conv2D_layer_bn(conv9_1,
                                     'conv9_2',
                                     num_filters=64,
                                     training=training,
                                     padding='SAME')

    pred_1 = layers.conv2D_layer_bn(conv9_2,
                                    'pred',
                                    num_filters=nlabels,
                                    kernel_size=(1, 1),
                                    activation=tf.identity,
                                    training=training,
                                    padding='SAME')

    # Deep supervision
    ds1_1 = layers.conv2D_layer(conv7_2,
                                'ds_1',
                                num_filters=nlabels,
                                kernel_size=(1, 1),
                                activation=tf.identity,
                                padding='SAME')
    ds1_2 = layers.deconv2D_layer(ds1_1,
                                  'ds_2',
                                  kernel_size=(4, 4),
                                  strides=(2, 2),
                                  num_filters=nlabels,
                                  padding='SAME')
    ds2_1 = layers.conv2D_layer(conv8_2,
                                'ds_3',
                                num_filters=nlabels,
                                kernel_size=(1, 1),
                                activation=tf.identity,
                                padding='SAME')
    ds1_ds2 = tf.add(ds1_2, ds2_1)
    ds = layers.deconv2D_layer(ds1_ds2,
                               'ds_4',
                               kernel_size=(4, 4),
                               strides=(2, 2),
                               num_filters=nlabels,
                               padding='SAME')

    pred_2 = tf.add(pred_1, ds)

    return pred_2
Пример #7
0
def unet_16_2D_bn(x, training, scope_name='generator'):

    n_ch_0 = 16

    with tf.variable_scope(scope_name):

        conv1_1 = layers.conv2D_layer_bn(x,
                                         'conv1_1',
                                         num_filters=n_ch_0,
                                         training=training)
        conv1_2 = layers.conv2D_layer_bn(conv1_1,
                                         'conv1_2',
                                         num_filters=n_ch_0,
                                         training=training)
        pool1 = layers.maxpool2D_layer(conv1_2)

        conv2_1 = layers.conv2D_layer_bn(pool1,
                                         'conv2_1',
                                         num_filters=n_ch_0 * 2,
                                         training=training)
        conv2_2 = layers.conv2D_layer_bn(conv2_1,
                                         'conv2_2',
                                         num_filters=n_ch_0 * 2,
                                         training=training)
        pool2 = layers.maxpool2D_layer(conv2_2)

        conv3_1 = layers.conv2D_layer_bn(pool2,
                                         'conv3_1',
                                         num_filters=n_ch_0 * 4,
                                         training=training)
        conv3_2 = layers.conv2D_layer_bn(conv3_1,
                                         'conv3_2',
                                         num_filters=n_ch_0 * 4,
                                         training=training)
        pool3 = layers.maxpool2D_layer(conv3_2)

        conv4_1 = layers.conv2D_layer_bn(pool3,
                                         'conv4_1',
                                         num_filters=n_ch_0 * 8,
                                         training=training)
        conv4_2 = layers.conv2D_layer_bn(conv4_1,
                                         'conv4_2',
                                         num_filters=n_ch_0 * 8,
                                         training=training)

        upconv3 = layers.deconv2D_layer_bn(conv4_2,
                                           name='upconv3',
                                           num_filters=n_ch_0,
                                           training=training)
        concat3 = layers.crop_and_concat_layer([upconv3, conv3_2], axis=-1)

        conv5_1 = layers.conv2D_layer_bn(concat3,
                                         'conv5_1',
                                         num_filters=n_ch_0 * 4,
                                         training=training)

        conv5_2 = layers.conv2D_layer_bn(conv5_1,
                                         'conv5_2',
                                         num_filters=n_ch_0 * 4,
                                         training=training)

        upconv2 = layers.deconv2D_layer_bn(conv5_2,
                                           name='upconv2',
                                           num_filters=n_ch_0,
                                           training=training)
        concat2 = layers.crop_and_concat_layer([upconv2, conv2_2], axis=-1)

        conv6_1 = layers.conv2D_layer_bn(concat2,
                                         'conv6_1',
                                         num_filters=n_ch_0 * 2,
                                         training=training)
        conv6_2 = layers.conv2D_layer_bn(conv6_1,
                                         'conv6_2',
                                         num_filters=n_ch_0 * 2,
                                         training=training)

        upconv1 = layers.deconv2D_layer_bn(conv6_2,
                                           name='upconv1',
                                           num_filters=n_ch_0,
                                           training=training)
        concat1 = layers.crop_and_concat_layer([upconv1, conv1_2], axis=-1)

        conv8_1 = layers.conv2D_layer_bn(concat1,
                                         'conv8_1',
                                         num_filters=n_ch_0,
                                         training=training)
        conv8_2 = layers.conv2D_layer(conv8_1,
                                      'conv8_2',
                                      num_filters=1,
                                      activation=tf.identity)

    return conv8_2
Пример #8
0
def unet2D_i2l(images,
               nlabels,
               training_pl,
               scope_reuse = False): 

    n0 = 16
    n1, n2, n3, n4 = 1*n0, 2*n0, 4*n0, 8*n0
    
    with tf.variable_scope('i2l_mapper') as scope:
        
        if scope_reuse:
            scope.reuse_variables()
        
        # ====================================
        # 1st Conv block - two conv layers, followed by max-pooling
        # ====================================
        conv1_1 = layers.conv2D_layer_bn(x=images, name='conv1_1', num_filters=n1, training = training_pl)
        conv1_2 = layers.conv2D_layer_bn(x=conv1_1, name='conv1_2', num_filters=n1, training = training_pl)
        pool1 = layers.max_pool_layer2d(conv1_2)
    
        # ====================================
        # 2nd Conv block
        # ====================================
        conv2_1 = layers.conv2D_layer_bn(x=pool1, name='conv2_1', num_filters=n2, training = training_pl)
        conv2_2 = layers.conv2D_layer_bn(x=conv2_1, name='conv2_2', num_filters=n2, training = training_pl)
        pool2 = layers.max_pool_layer2d(conv2_2)
    
        # ====================================
        # 3rd Conv block
        # ====================================
        conv3_1 = layers.conv2D_layer_bn(x=pool2, name='conv3_1', num_filters=n3, training = training_pl)
        conv3_2 = layers.conv2D_layer_bn(x=conv3_1, name='conv3_2', num_filters=n3, training = training_pl)
        pool3 = layers.max_pool_layer2d(conv3_1)
    
        # ====================================
        # 4th Conv block
        # ====================================
        conv4_1 = layers.conv2D_layer_bn(x=pool3, name='conv4_1', num_filters=n4, training = training_pl)
        conv4_2 = layers.conv2D_layer_bn(x=conv4_1, name='conv4_2', num_filters=n4, training = training_pl)
    
        # ====================================
        # Upsampling via bilinear upsampling, concatenation (skip connection), followed by 2 conv layers
        # ====================================
        deconv3 = layers.bilinear_upsample2D(conv4_2, size = (tf.shape(conv3_2)[1],tf.shape(conv3_2)[2]), name='upconv3')
        concat3 = tf.concat([deconv3, conv3_2], axis=-1)        
        conv5_1 = layers.conv2D_layer_bn(x=concat3, name='conv5_1', num_filters=n3, training = training_pl)
        conv5_2 = layers.conv2D_layer_bn(x=conv5_1, name='conv5_2', num_filters=n3, training = training_pl)
    
        # ====================================
        # Upsampling via bilinear upsampling, concatenation (skip connection), followed by 2 conv layers
        # ====================================
        deconv2 = layers.bilinear_upsample2D(conv5_2, size = (tf.shape(conv2_2)[1],tf.shape(conv2_2)[2]), name='upconv2')
        concat2 = tf.concat([deconv2, conv2_2], axis=-1)        
        conv6_1 = layers.conv2D_layer_bn(x=concat2, name='conv6_1', num_filters=n2, training = training_pl)
        conv6_2 = layers.conv2D_layer_bn(x=conv6_1, name='conv6_2', num_filters=n2, training = training_pl)
    
        # ====================================
        # Upsampling via bilinear upsampling, concatenation (skip connection), followed by 2 conv layers
        # ====================================
        deconv1 = layers.bilinear_upsample2D(conv6_2, size = (tf.shape(conv1_2)[1],tf.shape(conv1_2)[2]), name='upconv1')
        concat1 = tf.concat([deconv1, conv1_2], axis=-1)        
        conv7_1 = layers.conv2D_layer_bn(x=concat1, name='conv7_1', num_filters=n1, training = training_pl)
        conv7_2 = layers.conv2D_layer_bn(x=conv7_1, name='conv7_2', num_filters=n1, training = training_pl)
    
        # ====================================
        # Final conv layer - without batch normalization or activation
        # ====================================
        pred = layers.conv2D_layer(x=conv7_2, name='pred', num_filters=nlabels, kernel_size=1)
        
    return pool1, pool2, pool3, conv4_2, conv5_2, conv6_2, conv7_2, pred
Пример #9
0
def VGG16_FCN_8_bn(images, training, nlabels):

    conv1_1 = layers.conv2D_layer_bn(images, 'conv1_1', num_filters=64, training=training)
    conv1_2 = layers.conv2D_layer_bn(conv1_1, 'conv1_2', num_filters=64, training=training)

    pool1 = layers.max_pool_layer2d(conv1_2)

    conv2_1 = layers.conv2D_layer_bn(pool1, 'conv2_1', num_filters=128, training=training)
    conv2_2 = layers.conv2D_layer_bn(conv2_1, 'conv2_2', num_filters=128, training=training)

    pool2 = layers.max_pool_layer2d(conv2_2)

    conv3_1 = layers.conv2D_layer_bn(pool2, 'conv3_1', num_filters=256, training=training)
    conv3_2 = layers.conv2D_layer_bn(conv3_1, 'conv3_2', num_filters=256, training=training)
    conv3_3 = layers.conv2D_layer_bn(conv3_2, 'conv3_3', num_filters=256, training=training)

    pool3 = layers.max_pool_layer2d(conv3_3)

    conv4_1 = layers.conv2D_layer_bn(pool3, 'conv4_1', num_filters=512, training=training)
    conv4_2 = layers.conv2D_layer_bn(conv4_1, 'conv4_2', num_filters=512, training=training)
    conv4_3 = layers.conv2D_layer_bn(conv4_2, 'conv4_3', num_filters=512, training=training)

    pool4 = layers.max_pool_layer2d(conv4_3)

    conv5_1 = layers.conv2D_layer_bn(pool4, 'conv5_1', num_filters=512, training=training)
    conv5_2 = layers.conv2D_layer_bn(conv5_1, 'conv5_2', num_filters=512, training=training)
    conv5_3 = layers.conv2D_layer_bn(conv5_2, 'conv5_3', num_filters=512, training=training)

    pool5 = layers.max_pool_layer2d(conv5_3)

    conv6 = layers.conv2D_layer_bn(pool5, 'conv6', num_filters=4096, kernel_size=(7,7), training=training)
    conv7= layers.conv2D_layer_bn(conv6, 'conv7', num_filters=4096, kernel_size=(1,1), training=training)

    score5 = layers.conv2D_layer_bn(conv7, 'score5', num_filters=nlabels, kernel_size=(1,1), training=training)
    score4 = layers.conv2D_layer_bn(pool4, 'score4', num_filters=nlabels, kernel_size=(1,1), training=training)
    score3 = layers.conv2D_layer_bn(pool3, 'score3', num_filters=nlabels, kernel_size=(1,1), training=training)

    upscore1 = layers.deconv2D_layer_bn(score5, name='upscore1', kernel_size=(4,4), strides=(2,2), num_filters=nlabels, weight_init='bilinear', training=training)

    sum1 = tf.add(upscore1, score4)

    upscore2 = layers.deconv2D_layer_bn(sum1, name='upscore2', kernel_size=(4,4), strides=(2,2), num_filters=nlabels, weight_init='bilinear', training=training)

    sum2 = tf.add(upscore2, score3)

    upscore3 = layers.deconv2D_layer_bn(sum2, name='upscore3', kernel_size=(16,16), strides=(8,8), num_filters=nlabels, weight_init='bilinear', training=training, activation=tf.identity)

    return upscore3
Пример #10
0
def unet2D_bn_padding_same_modified(images, training, nlabels):

    conv1_1 = layers.conv2D_layer_bn(images, 'conv1_1', num_filters=64, training=training)
    conv1_2 = layers.conv2D_layer_bn(conv1_1, 'conv1_2', num_filters=64, training=training)

    pool1 = layers.max_pool_layer2d(conv1_2)

    conv2_1 = layers.conv2D_layer_bn(pool1, 'conv2_1', num_filters=128, training=training)
    conv2_2 = layers.conv2D_layer_bn(conv2_1, 'conv2_2', num_filters=128, training=training)

    pool2 = layers.max_pool_layer2d(conv2_2)

    conv3_1 = layers.conv2D_layer_bn(pool2, 'conv3_1', num_filters=256, training=training)
    conv3_2 = layers.conv2D_layer_bn(conv3_1, 'conv3_2', num_filters=256, training=training)

    pool3 = layers.max_pool_layer2d(conv3_2)

    conv4_1 = layers.conv2D_layer_bn(pool3, 'conv4_1', num_filters=512, training=training)
    conv4_2 = layers.conv2D_layer_bn(conv4_1, 'conv4_2', num_filters=512, training=training)

    pool4 = layers.max_pool_layer2d(conv4_2)

    conv5_1 = layers.conv2D_layer_bn(pool4, 'conv5_1', num_filters=1024, training=training)
    conv5_2 = layers.conv2D_layer_bn(conv5_1, 'conv5_2', num_filters=1024, training=training)

    upconv4 = layers.deconv2D_layer_bn(conv5_2, name='upconv4', kernel_size=(4, 4), strides=(2, 2), num_filters=nlabels, training=training)
    concat4 = tf.concat([conv4_2, upconv4], axis=3, name='concat4')

    conv6_1 = layers.conv2D_layer_bn(concat4, 'conv6_1', num_filters=512, training=training)
    conv6_2 = layers.conv2D_layer_bn(conv6_1, 'conv6_2', num_filters=512, training=training)

    upconv3 = layers.deconv2D_layer_bn(conv6_2, name='upconv3', kernel_size=(4, 4), strides=(2, 2), num_filters=nlabels, training=training)
    concat3 = tf.concat([conv3_2, upconv3], axis=3, name='concat3')

    conv7_1 = layers.conv2D_layer_bn(concat3, 'conv7_1', num_filters=256, training=training)
    conv7_2 = layers.conv2D_layer_bn(conv7_1, 'conv7_2', num_filters=256, training=training)

    upconv2 = layers.deconv2D_layer_bn(conv7_2, name='upconv2', kernel_size=(4, 4), strides=(2, 2), num_filters=nlabels, training=training)
    concat2 = tf.concat([conv2_2, upconv2], axis=3, name='concat2')

    conv8_1 = layers.conv2D_layer_bn(concat2, 'conv8_1', num_filters=128, training=training)
    conv8_2 = layers.conv2D_layer_bn(conv8_1, 'conv8_2', num_filters=128, training=training)

    upconv1 = layers.deconv2D_layer_bn(conv8_2, name='upconv1', kernel_size=(4, 4), strides=(2, 2), num_filters=nlabels, training=training)
    concat1 = tf.concat([conv1_2, upconv1], axis=3, name='concat1')

    conv9_1 = layers.conv2D_layer_bn(concat1, 'conv9_1', num_filters=64, training=training)
    conv9_2 = layers.conv2D_layer_bn(conv9_1, 'conv9_2', num_filters=64, training=training)

    pred = layers.conv2D_layer_bn(conv9_2, 'pred', num_filters=nlabels, kernel_size=(1,1), activation=tf.identity, training=training)

    return pred