def C2D_fcn_16_body(x, training, scope_name='classifier', scope_reuse=False):
    with tf.variable_scope(scope_name) as scope:
        if scope_reuse:
            scope.reuse_variables()

        conv1_1 = layers.conv2D_layer(x, 'conv1_1', num_filters=16)

        pool1 = layers.maxpool2D_layer(conv1_1)

        conv2_1 = layers.conv2D_layer(pool1, 'conv2_1', num_filters=32)

        pool2 = layers.maxpool2D_layer(conv2_1)

        conv3_1 = layers.conv2D_layer(pool2, 'conv3_1', num_filters=64)
        conv3_2 = layers.conv2D_layer(conv3_1, 'conv3_2', num_filters=64)

        pool3 = layers.maxpool2D_layer(conv3_2)

        conv4_1 = layers.conv2D_layer(pool3, 'conv4_1', num_filters=128)
        conv4_2 = layers.conv2D_layer(conv4_1, 'conv4_2', num_filters=128)

        pool4 = layers.maxpool2D_layer(conv4_2)

        conv5_1 = layers.conv2D_layer(pool4, 'conv5_1', num_filters=256)
        conv5_2 = layers.conv2D_layer(conv5_1, 'conv5_2', num_filters=256)

        convD_1 = layers.conv2D_layer(conv5_2, 'convD_1', num_filters=256)

        logits = tf.reduce_mean(convD_1, axis=(1, 2))

    return logits
Beispiel #2
0
def C3D_fcn_16_2D_conditioned_with_delta(x,
                                         training,
                                         scope_name='critic',
                                         scope_reuse=False):

    with tf.variable_scope(scope_name) as scope:
        if scope_reuse:
            scope.reuse_variables()

        # discrimnator is conditioned on the difference map
        condition = x[:, :, :, 2:3]
        delta_img = x[:, :, :1:2]
        delta = delta_img[:, 0, 0, :]
        x = x[:, :, :, 0:1]

        conv1_1 = layers.conv2D_layer(x, 'conv1_1', num_filters=16)

        pool1 = layers.maxpool2D_layer(conv1_1)

        conv2_1 = layers.conv2D_layer(pool1, 'conv2_1', num_filters=32)

        pool2 = layers.maxpool2D_layer(conv2_1)

        conv3_1 = layers.conv2D_layer(pool2, 'conv3_1', num_filters=64)
        conv3_2 = layers.conv2D_layer(conv3_1, 'conv3_2', num_filters=64)

        pool3 = layers.maxpool2D_layer(conv3_2)

        conv4_1 = layers.conv2D_layer(pool3, 'conv4_1', num_filters=128)
        conv4_2 = layers.conv2D_layer(conv4_1, 'conv4_2', num_filters=128)

        pool4 = layers.maxpool2D_layer(conv4_2)

        conv5_1 = layers.conv2D_layer(pool4, 'conv5_1', num_filters=256)
        conv5_2 = layers.conv2D_layer(conv5_1, 'conv5_2', num_filters=256)

        convD_1 = layers.conv2D_layer(conv5_2, 'convD_1', num_filters=256)
        convD_2 = layers.conv2D_layer(convD_1,
                                      'convD_2',
                                      num_filters=1,
                                      kernel_size=(1, 1, 1),
                                      activation=tf.identity)

        logits = layers.averagepool2D_layer(convD_2, name='diagnosis_avg')

        condition_logits = C3D_fcn_16_2D(condition,
                                         training,
                                         scope_name="critic_condition",
                                         scope_reuse=scope_reuse)

        all_logits = tf.concat([logits, condition_logits, delta], axis=-1)
        decision = tf.layers.dense(all_logits, 1, reuse=scope_reuse)

    return decision
Beispiel #3
0
def C3D_fcn_16_2D_conditioned(x,
                              training,
                              scope_name='critic',
                              scope_reuse=False):

    with tf.variable_scope(scope_name) as scope:
        if scope_reuse:
            scope.reuse_variables()

        # check if last channel larger than 1
        condition = x[:, :, :, 1:2]
        x = x[:, :, :, 0:1]

        conv1_1 = layers.conv2D_layer(x, 'conv1_1', num_filters=16)

        pool1 = layers.maxpool2D_layer(conv1_1)

        conv2_1 = layers.conv2D_layer(pool1, 'conv2_1', num_filters=32)

        pool2 = layers.maxpool2D_layer(conv2_1)

        conv3_1 = layers.conv2D_layer(pool2, 'conv3_1', num_filters=64)
        conv3_2 = layers.conv2D_layer(conv3_1, 'conv3_2', num_filters=64)

        pool3 = layers.maxpool2D_layer(conv3_2)

        conv4_1 = layers.conv2D_layer(pool3, 'conv4_1', num_filters=128)
        conv4_2 = layers.conv2D_layer(conv4_1, 'conv4_2', num_filters=128)

        pool4 = layers.maxpool2D_layer(conv4_2)

        conv5_1 = layers.conv2D_layer(pool4, 'conv5_1', num_filters=256)
        conv5_2 = layers.conv2D_layer(conv5_1, 'conv5_2', num_filters=256)

        convD_1 = layers.conv2D_layer(conv5_2, 'convD_1', num_filters=256)
        convD_2 = layers.conv2D_layer(convD_1,
                                      'convD_2',
                                      num_filters=1,
                                      kernel_size=(1, 1, 1),
                                      activation=tf.identity)

        logits = layers.averagepool2D_layer(convD_2, name='diagnosis_avg')

        condition_logits = C3D_fcn_16_2D(condition,
                                         training,
                                         scope_name="critic_condition",
                                         scope_reuse=scope_reuse)

        all_logits = tf.concat([logits, condition_logits], axis=-1)
        decision = tf.layers.dense(all_logits, 1, reuse=scope_reuse)

    return decision
Beispiel #4
0
def C3D_fcn_16_2D_body(x, training, scope_name='critic', scope_reuse=False):

    with tf.variable_scope(scope_name) as scope:
        if scope_reuse:
            scope.reuse_variables()

        conv1_1 = layers.conv2D_layer(x, 'conv1_1', num_filters=16)

        pool1 = layers.maxpool2D_layer(conv1_1)

        conv2_1 = layers.conv2D_layer(pool1, 'conv2_1', num_filters=32)

        pool2 = layers.maxpool2D_layer(conv2_1)

        conv3_1 = layers.conv2D_layer(pool2, 'conv3_1', num_filters=64)
        conv3_2 = layers.conv2D_layer(conv3_1, 'conv3_2', num_filters=64)

        pool3 = layers.maxpool2D_layer(conv3_2)

    return pool3
Beispiel #5
0
def C3D_fcn_16_2D(x, training, scope_name='critic', scope_reuse=False):

    with tf.variable_scope(scope_name) as scope:
        if scope_reuse:
            scope.reuse_variables()

        conv1_1 = layers.conv2D_layer(x, 'conv1_1', num_filters=16)

        pool1 = layers.maxpool2D_layer(conv1_1)

        conv2_1 = layers.conv2D_layer(pool1, 'conv2_1', num_filters=32)

        pool2 = layers.maxpool2D_layer(conv2_1)

        conv3_1 = layers.conv2D_layer(pool2, 'conv3_1', num_filters=64)
        conv3_2 = layers.conv2D_layer(conv3_1, 'conv3_2', num_filters=64)

        pool3 = layers.maxpool2D_layer(conv3_2)

        conv4_1 = layers.conv2D_layer(pool3, 'conv4_1', num_filters=128)
        conv4_2 = layers.conv2D_layer(conv4_1, 'conv4_2', num_filters=128)

        pool4 = layers.maxpool2D_layer(conv4_2)

        conv5_1 = layers.conv2D_layer(pool4, 'conv5_1', num_filters=256)
        conv5_2 = layers.conv2D_layer(conv5_1, 'conv5_2', num_filters=256)

        convD_1 = layers.conv2D_layer(conv5_2, 'convD_1', num_filters=256)
        convD_2 = layers.conv2D_layer(convD_1,
                                      'convD_2',
                                      num_filters=1,
                                      kernel_size=(1, 1, 1),
                                      activation=tf.identity)

        logits = layers.averagepool2D_layer(convD_2, name='diagnosis_avg')

    return logits
def unet_16_2D_bn(x, training, scope_name='generator'):

    n_ch_0 = 16

    with tf.variable_scope(scope_name):
        conv1_1 = layers.conv2D_layer_bn(x,
                                         'conv1_1',
                                         num_filters=n_ch_0,
                                         training=training)
        conv1_2 = layers.conv2D_layer_bn(conv1_1,
                                         'conv1_2',
                                         num_filters=n_ch_0,
                                         training=training)
        pool1 = layers.maxpool2D_layer(conv1_2)

        conv2_1 = layers.conv2D_layer_bn(pool1,
                                         'conv2_1',
                                         num_filters=n_ch_0 * 2,
                                         training=training)
        conv2_2 = layers.conv2D_layer_bn(conv2_1,
                                         'conv2_2',
                                         num_filters=n_ch_0 * 2,
                                         training=training)
        pool2 = layers.maxpool2D_layer(conv2_2)

        conv3_1 = layers.conv2D_layer_bn(pool2,
                                         'conv3_1',
                                         num_filters=n_ch_0 * 4,
                                         training=training)
        conv3_2 = layers.conv2D_layer_bn(conv3_1,
                                         'conv3_2',
                                         num_filters=n_ch_0 * 4,
                                         training=training)
        pool3 = layers.maxpool2D_layer(conv3_2)

        conv4_1 = layers.conv2D_layer_bn(pool3,
                                         'conv4_1',
                                         num_filters=n_ch_0 * 8,
                                         training=training)
        conv4_2 = layers.conv2D_layer_bn(conv4_1,
                                         'conv4_2',
                                         num_filters=n_ch_0 * 8,
                                         training=training)

        upconv3 = layers.deconv2D_layer_bn(conv4_2,
                                           name='upconv3',
                                           num_filters=n_ch_0,
                                           training=training)
        concat3 = layers.crop_and_concat_layer_fixed([upconv3, conv3_2],
                                                     axis=-1)

        conv5_1 = layers.conv2D_layer_bn(concat3,
                                         'conv5_1',
                                         num_filters=n_ch_0 * 4,
                                         training=training)

        conv5_2 = layers.conv2D_layer_bn(conv5_1,
                                         'conv5_2',
                                         num_filters=n_ch_0 * 4,
                                         training=training)

        upconv2 = layers.deconv2D_layer_bn(conv5_2,
                                           name='upconv2',
                                           num_filters=n_ch_0,
                                           training=training)
        concat2 = layers.crop_and_concat_layer_fixed([upconv2, conv2_2],
                                                     axis=-1)

        conv6_1 = layers.conv2D_layer_bn(concat2,
                                         'conv6_1',
                                         num_filters=n_ch_0 * 2,
                                         training=training)
        conv6_2 = layers.conv2D_layer_bn(conv6_1,
                                         'conv6_2',
                                         num_filters=n_ch_0 * 2,
                                         training=training)

        upconv1 = layers.deconv2D_layer_bn(conv6_2,
                                           name='upconv1',
                                           num_filters=n_ch_0,
                                           training=training)
        concat1 = layers.crop_and_concat_layer_fixed([upconv1, conv1_2],
                                                     axis=-1)

        conv8_1 = layers.conv2D_layer_bn(concat1,
                                         'conv8_1',
                                         num_filters=n_ch_0,
                                         training=training)
        conv8_2 = layers.conv2D_layer(conv8_1,
                                      'conv8_2',
                                      num_filters=1,
                                      activation=tf.identity)

    return conv8_2
def unet_16_2D_allow_reuse(x,
                           training,
                           scope_name='generator',
                           scope_reuse=True):

    n_ch_0 = 16

    with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE) as scope:
        # if scope_reuse:
        #  scope.reuse_variables()

        conv1_1 = layers.conv2D_layer(x, 'conv1_1', num_filters=n_ch_0)
        conv1_2 = layers.conv2D_layer(conv1_1, 'conv1_2', num_filters=n_ch_0)
        pool1 = layers.maxpool2D_layer(conv1_2)

        conv2_1 = layers.conv2D_layer(pool1, 'conv2_1', num_filters=n_ch_0 * 2)
        conv2_2 = layers.conv2D_layer(conv2_1,
                                      'conv2_2',
                                      num_filters=n_ch_0 * 2)
        pool2 = layers.maxpool2D_layer(conv2_2)

        conv3_1 = layers.conv2D_layer(pool2, 'conv3_1', num_filters=n_ch_0 * 4)
        conv3_2 = layers.conv2D_layer(conv3_1,
                                      'conv3_2',
                                      num_filters=n_ch_0 * 4)
        pool3 = layers.maxpool2D_layer(conv3_2)

        conv4_1 = layers.conv2D_layer(pool3, 'conv4_1', num_filters=n_ch_0 * 8)
        conv4_2 = layers.conv2D_layer(conv4_1,
                                      'conv4_2',
                                      num_filters=n_ch_0 * 8)

        upconv3 = layers.deconv2D_layer(conv4_2,
                                        name='upconv3',
                                        num_filters=n_ch_0)
        concat3 = layers.crop_and_concat_layer_fixed([upconv3, conv3_2],
                                                     axis=-1)

        conv5_1 = layers.conv2D_layer(concat3,
                                      'conv5_1',
                                      num_filters=n_ch_0 * 4)

        conv5_2 = layers.conv2D_layer(conv5_1,
                                      'conv5_2',
                                      num_filters=n_ch_0 * 4)

        upconv2 = layers.deconv2D_layer(conv5_2,
                                        name='upconv2',
                                        num_filters=n_ch_0)
        concat2 = layers.crop_and_concat_layer_fixed([upconv2, conv2_2],
                                                     axis=-1)

        conv6_1 = layers.conv2D_layer(concat2,
                                      'conv6_1',
                                      num_filters=n_ch_0 * 2)
        conv6_2 = layers.conv2D_layer(conv6_1,
                                      'conv6_2',
                                      num_filters=n_ch_0 * 2)

        upconv1 = layers.deconv2D_layer(conv6_2,
                                        name='upconv1',
                                        num_filters=n_ch_0)
        concat1 = layers.crop_and_concat_layer_fixed([upconv1, conv1_2],
                                                     axis=-1)

        conv8_1 = layers.conv2D_layer(concat1, 'conv8_1', num_filters=n_ch_0)
        conv8_2 = layers.conv2D_layer(conv8_1,
                                      'conv8_2',
                                      num_filters=1,
                                      activation=tf.identity)

    return conv8_2