def only_conv_generator(z,
                        training,
                        residual=True,
                        batch_normalization=False,
                        hidden_layers=2,
                        filters=16,
                        input_noise_dim=0,
                        scope_name='generator',
                        scope_reuse=False):
    # batch size 2: hidden_layers=2, filters=16
    # batch size 1: hidden_layers=3, filters=32
    # only residual connection from beginning to end possible
    with tf.variable_scope(scope_name) as scope:
        if scope_reuse:
            scope.reuse_variables()
        previous_layer = z
        if input_noise_dim >= 1:
            # create noise, push it through a fc layer and concatenate it as a new channel
            noise_in = tf.random_uniform(shape=[
                previous_layer.get_shape().as_list()[0], input_noise_dim
            ],
                                         minval=-1,
                                         maxval=1)
            # make sure the last dimension is 1 but the others agree with the image input
            noise_channel_shape = previous_layer.shape[:-1]
            # the batchsize stays constant
            fc_hidden_units = np.prod(noise_channel_shape[1:])
            fc_noise_layer = layers.dense_layer(noise_in,
                                                'fc_noise_layer',
                                                hidden_units=fc_hidden_units,
                                                activation=tf.identity)
            noise_channel = tf.reshape(fc_noise_layer, noise_channel_shape)
            noise_channel = tf.expand_dims(noise_channel, axis=-1)
            previous_layer = tf.concat([previous_layer, noise_channel],
                                       axis=-1)
        for depth in range(1, hidden_layers + 1):
            if (batch_normalization):
                previous_layer = layers.conv3D_layer_bn(previous_layer,
                                                        'gconv%d' % depth,
                                                        training,
                                                        num_filters=filters,
                                                        activation=tf.nn.relu)
            else:
                previous_layer = layers.conv3D_layer(previous_layer,
                                                     'gconv%d' % depth,
                                                     num_filters=32,
                                                     activation=tf.nn.relu)
        last_layer = layers.conv3D_layer(previous_layer,
                                         'gconv%d_last' % (hidden_layers + 1),
                                         num_filters=1,
                                         kernel_size=(1, 1, 1),
                                         strides=(1, 1, 1),
                                         activation=tf.identity)
        if residual:
            return last_layer + z
        else:
            return last_layer
def jia_xi_net(images,
               training,
               nlabels,
               scope_name='classifier',
               scope_reuse=False):

    with tf.variable_scope(scope_name) as scope:
        if scope_reuse:
            scope.reuse_variables()

        conv1_1 = layers.conv3D_layer(images, 'conv1_1', num_filters=32)

        pool1 = layers.max_pool_layer3d(conv1_1)

        conv2_1 = layers.conv3D_layer(pool1, 'conv2_1', num_filters=64)

        pool2 = layers.max_pool_layer3d(conv2_1)

        conv3_1 = layers.conv3D_layer(pool2, 'conv3_1', num_filters=128)
        conv3_2 = layers.conv3D_layer(conv3_1, 'conv3_2', num_filters=128)

        pool3 = layers.max_pool_layer3d(conv3_2)

        conv4_1 = layers.conv3D_layer(pool3, 'conv4_1', num_filters=256)
        conv4_2 = layers.conv3D_layer(conv4_1, 'conv4_2', num_filters=256)

        pool4 = layers.max_pool_layer3d(conv4_2)

        dense1 = layers.dense_layer(pool4, 'dense1', hidden_units=512)
        dense2 = layers.dense_layer(dense1,
                                    'dense2',
                                    hidden_units=nlabels,
                                    activation=tf.identity)

        return dense2
예제 #3
0
def C3D_fcn_16(x, training, scope_name='critic', scope_reuse=False):

    with tf.variable_scope(scope_name) as scope:
        if scope_reuse:
            scope.reuse_variables()

        conv1_1 = layers.conv3D_layer(x, 'conv1_1', num_filters=16)

        pool1 = layers.maxpool3D_layer(conv1_1)

        conv2_1 = layers.conv3D_layer(pool1, 'conv2_1', num_filters=32)

        pool2 = layers.maxpool3D_layer(conv2_1)

        conv3_1 = layers.conv3D_layer(pool2, 'conv3_1', num_filters=64)
        conv3_2 = layers.conv3D_layer(conv3_1, 'conv3_2', num_filters=64)

        pool3 = layers.maxpool3D_layer(conv3_2)

        conv4_1 = layers.conv3D_layer(pool3, 'conv4_1', num_filters=128)
        conv4_2 = layers.conv3D_layer(conv4_1, 'conv4_2', num_filters=128)

        pool4 = layers.maxpool3D_layer(conv4_2)

        conv5_1 = layers.conv3D_layer(pool4, 'conv5_1', num_filters=256)
        conv5_2 = layers.conv3D_layer(conv5_1, 'conv5_2', num_filters=256)

        convD_1 = layers.conv3D_layer(conv5_2, 'convD_1', num_filters=256)
        convD_2 = layers.conv3D_layer(convD_1,
                                      'convD_2',
                                      num_filters=1,
                                      kernel_size=(1, 1, 1),
                                      activation=tf.identity)

        logits = layers.averagepool3D_layer(convD_2, name='diagnosis_avg')

    return logits
def jia_xi_net_multitask_ordinal(images,
                                 training,
                                 nlabels,
                                 n_age_thresholds=5,
                                 scope_name='classifier',
                                 scope_reuse=False):
    with tf.variable_scope(scope_name) as scope:
        if scope_reuse:
            scope.reuse_variables()

        conv1_1 = layers.conv3D_layer(images, 'conv1_1', num_filters=32)

        pool1 = layers.max_pool_layer3d(conv1_1)

        conv2_1 = layers.conv3D_layer(pool1, 'conv2_1', num_filters=64)

        pool2 = layers.max_pool_layer3d(conv2_1)

        conv3_1 = layers.conv3D_layer(pool2, 'conv3_1', num_filters=128)
        conv3_2 = layers.conv3D_layer(conv3_1, 'conv3_2', num_filters=128)

        pool3 = layers.max_pool_layer3d(conv3_2)

        conv4_1 = layers.conv3D_layer(pool3, 'conv4_1', num_filters=256)
        conv4_2 = layers.conv3D_layer(conv4_1, 'conv4_2', num_filters=256)

        pool4 = layers.max_pool_layer3d(conv4_2)

        dense1 = layers.dense_layer(pool4, 'dense1', hidden_units=512)
        diagnosis = layers.dense_layer(dense1,
                                       'dense2',
                                       hidden_units=nlabels,
                                       activation=tf.identity)

        dense_ages = layers.dense_layer(pool4, 'dense_ages', hidden_units=512)

        ages_logits = []
        for ii in range(n_age_thresholds):
            ages_logits.append(
                layers.dense_layer(dense_ages,
                                   'age_%s' % str(ii),
                                   hidden_units=2,
                                   activation=tf.identity))

        return diagnosis, ages_logits
예제 #5
0
def unet_16_bn(x, training, scope_name='generator'):

    n_ch_0 = 16

    with tf.variable_scope(scope_name):

        conv1_1 = layers.conv3D_layer_bn(x,
                                         'conv1_1',
                                         num_filters=n_ch_0,
                                         training=training)
        conv1_2 = layers.conv3D_layer_bn(conv1_1,
                                         'conv1_2',
                                         num_filters=n_ch_0,
                                         training=training)
        pool1 = layers.maxpool3D_layer(conv1_2)

        conv2_1 = layers.conv3D_layer_bn(pool1,
                                         'conv2_1',
                                         num_filters=n_ch_0 * 2,
                                         training=training)
        conv2_2 = layers.conv3D_layer_bn(conv2_1,
                                         'conv2_2',
                                         num_filters=n_ch_0 * 2,
                                         training=training)
        pool2 = layers.maxpool3D_layer(conv2_2)

        conv3_1 = layers.conv3D_layer_bn(pool2,
                                         'conv3_1',
                                         num_filters=n_ch_0 * 4,
                                         training=training)
        conv3_2 = layers.conv3D_layer_bn(conv3_1,
                                         'conv3_2',
                                         num_filters=n_ch_0 * 4,
                                         training=training)
        pool3 = layers.maxpool3D_layer(conv3_2)

        conv4_1 = layers.conv3D_layer_bn(pool3,
                                         'conv4_1',
                                         num_filters=n_ch_0 * 8,
                                         training=training)
        conv4_2 = layers.conv3D_layer_bn(conv4_1,
                                         'conv4_2',
                                         num_filters=n_ch_0 * 8,
                                         training=training)

        upconv3 = layers.deconv3D_layer_bn(conv4_2,
                                           name='upconv3',
                                           num_filters=n_ch_0,
                                           training=training)
        concat3 = layers.crop_and_concat_layer([upconv3, conv3_2], axis=-1)

        conv5_1 = layers.conv3D_layer_bn(concat3,
                                         'conv5_1',
                                         num_filters=n_ch_0 * 4,
                                         training=training)

        conv5_2 = layers.conv3D_layer_bn(conv5_1,
                                         'conv5_2',
                                         num_filters=n_ch_0 * 4,
                                         training=training)

        upconv2 = layers.deconv3D_layer_bn(conv5_2,
                                           name='upconv2',
                                           num_filters=n_ch_0,
                                           training=training)
        concat2 = layers.crop_and_concat_layer([upconv2, conv2_2], axis=-1)

        conv6_1 = layers.conv3D_layer_bn(concat2,
                                         'conv6_1',
                                         num_filters=n_ch_0 * 2,
                                         training=training)
        conv6_2 = layers.conv3D_layer_bn(conv6_1,
                                         'conv6_2',
                                         num_filters=n_ch_0 * 2,
                                         training=training)

        upconv1 = layers.deconv3D_layer_bn(conv6_2,
                                           name='upconv1',
                                           num_filters=n_ch_0,
                                           training=training)
        concat1 = layers.crop_and_concat_layer([upconv1, conv1_2], axis=-1)

        conv8_1 = layers.conv3D_layer_bn(concat1,
                                         'conv8_1',
                                         num_filters=n_ch_0,
                                         training=training)
        conv8_2 = layers.conv3D_layer(conv8_1,
                                      'conv8_2',
                                      num_filters=1,
                                      activation=tf.identity)

    return conv8_2
def g_encoder_decoder_skip_notanh(z,
                                  training,
                                  scope_name='generator',
                                  scope_reuse=False):
    with tf.variable_scope(scope_name) as scope:
        if scope_reuse:
            scope.reuse_variables()
        layer1 = layers.conv3D_layer_bn(z,
                                        'glayer1',
                                        num_filters=32,
                                        training=training,
                                        kernel_size=(4, 4, 4),
                                        strides=(2, 2, 2))
        layer2 = layers.conv3D_layer_bn(layer1,
                                        'glayer2',
                                        num_filters=64,
                                        training=training,
                                        kernel_size=(4, 4, 4),
                                        strides=(2, 2, 2))
        layer3 = layers.conv3D_layer_bn(layer2,
                                        'glayer3',
                                        num_filters=128,
                                        training=training,
                                        kernel_size=(4, 4, 4),
                                        strides=(2, 2, 2))
        layer4 = layers.conv3D_layer_bn(layer3,
                                        'glayer4',
                                        num_filters=256,
                                        training=training,
                                        kernel_size=(4, 4, 4),
                                        strides=(2, 2, 2))

        layer5 = layers.deconv3D_layer_bn(layer4,
                                          name='glayer5',
                                          kernel_size=(4, 4, 4),
                                          strides=(2, 2, 2),
                                          num_filters=256,
                                          training=training)
        layer6 = layers.deconv3D_layer_bn(layer5,
                                          name='glayer6',
                                          kernel_size=(4, 4, 4),
                                          strides=(2, 2, 2),
                                          num_filters=128,
                                          training=training)
        layer7 = layers.deconv3D_layer_bn(layer6,
                                          name='glayer7',
                                          kernel_size=(4, 4, 4),
                                          strides=(2, 2, 2),
                                          num_filters=64,
                                          training=training)
        layer8 = layers.deconv3D_layer_bn(layer7,
                                          name='glayer8',
                                          kernel_size=(4, 4, 4),
                                          strides=(2, 2, 2),
                                          num_filters=32,
                                          training=training)

        layer9 = layers.conv3D_layer(layer8,
                                     'glayer9',
                                     num_filters=1,
                                     kernel_size=(1, 1, 1),
                                     activation=tf.identity)

        return z + layer9
def pool_fc_discriminator_bs2(x,
                              training,
                              scope_name='discriminator',
                              scope_reuse=False):
    with tf.variable_scope(scope_name) as scope:
        if scope_reuse:
            scope.reuse_variables()

        conv1_1 = layers.conv3D_layer(x,
                                      'dconv1_1',
                                      kernel_size=(3, 3, 3),
                                      num_filters=8,
                                      strides=(1, 1, 1),
                                      activation=layers.leaky_relu)

        pool1 = layers.max_pool_layer3d(conv1_1)

        conv2_1 = layers.conv3D_layer(pool1,
                                      'dconv2_1',
                                      kernel_size=(3, 3, 3),
                                      num_filters=16,
                                      strides=(1, 1, 1),
                                      activation=layers.leaky_relu)

        pool2 = layers.max_pool_layer3d(conv2_1)

        conv3_1 = layers.conv3D_layer(pool2,
                                      'dconv3_1',
                                      kernel_size=(3, 3, 3),
                                      num_filters=16,
                                      strides=(1, 1, 1),
                                      activation=layers.leaky_relu)

        conv3_2 = layers.conv3D_layer_bn(conv3_1,
                                         'dconv3_2',
                                         kernel_size=(3, 3, 3),
                                         num_filters=16,
                                         strides=(1, 1, 1),
                                         activation=layers.leaky_relu,
                                         training=training)

        pool3 = layers.max_pool_layer3d(conv3_2)

        conv4_1 = layers.conv3D_layer(pool3,
                                      'dconv4_1',
                                      kernel_size=(3, 3, 3),
                                      num_filters=16,
                                      strides=(1, 1, 1),
                                      activation=layers.leaky_relu)

        conv4_2 = layers.conv3D_layer(conv4_1,
                                      'dconv4_2',
                                      kernel_size=(3, 3, 3),
                                      num_filters=16,
                                      strides=(1, 1, 1),
                                      activation=layers.leaky_relu)

        pool4 = layers.max_pool_layer3d(conv4_2)

        conv5_1 = layers.conv3D_layer(pool4,
                                      'dconv5_1',
                                      kernel_size=(3, 3, 3),
                                      num_filters=16,
                                      strides=(1, 1, 1),
                                      activation=layers.leaky_relu)

        conv5_2 = layers.conv3D_layer(conv5_1,
                                      'dconv5_2',
                                      kernel_size=(3, 3, 3),
                                      num_filters=16,
                                      strides=(1, 1, 1),
                                      activation=layers.leaky_relu)

        pool5 = layers.max_pool_layer3d(conv5_2)

        conv6_1 = layers.conv3D_layer(pool5,
                                      'dconv6_1',
                                      kernel_size=(3, 3, 3),
                                      num_filters=16,
                                      strides=(1, 1, 1),
                                      activation=layers.leaky_relu)

        conv6_2 = layers.conv3D_layer(conv6_1,
                                      'dconv6_2',
                                      kernel_size=(3, 3, 3),
                                      num_filters=16,
                                      strides=(1, 1, 1),
                                      activation=layers.leaky_relu)

        pool6 = layers.max_pool_layer3d(conv6_2)

        dense1 = layers.dense_layer(pool6,
                                    'ddense1',
                                    hidden_units=256,
                                    activation=layers.leaky_relu)

        dense2 = layers.dense_layer(dense1,
                                    'ddense2',
                                    hidden_units=1,
                                    activation=tf.identity)

        return dense2
def bousmalis_discriminator(x,
                            training,
                            batch_normalization,
                            middle_layers,
                            initial_filters,
                            dropout_start=3,
                            scope_name='discriminator',
                            scope_reuse=False):
    # leaky relu has the same parameter as in the paper
    leaky_relu = lambda x: layers.leaky_relu(x, alpha=0.2)
    with tf.variable_scope(scope_name) as scope:
        if scope_reuse:
            scope.reuse_variables()
        if batch_normalization:
            previous_layer = layers.conv3D_layer_bn(
                x,
                'convs1_1',
                kernel_size=(3, 3, 3),
                num_filters=initial_filters,
                strides=(1, 1, 1),
                activation=leaky_relu,
                training=training)
        else:
            previous_layer = layers.conv3D_layer(x,
                                                 'convs1_1',
                                                 kernel_size=(3, 3, 3),
                                                 num_filters=initial_filters,
                                                 strides=(1, 1, 1),
                                                 activation=leaky_relu)

        for current_layer in range(2, 2 + middle_layers):
            num_filters = initial_filters * (2**(current_layer - 1))
            if batch_normalization:
                previous_layer = layers.conv3D_layer_bn(
                    previous_layer,
                    'convs2_' + str(current_layer),
                    kernel_size=(3, 3, 3),
                    num_filters=num_filters,
                    strides=(2, 2, 2),
                    activation=leaky_relu,
                    training=training)
            else:
                previous_layer = layers.conv3D_layer(previous_layer,
                                                     'convs2_' +
                                                     str(current_layer),
                                                     kernel_size=(3, 3, 3),
                                                     num_filters=num_filters,
                                                     strides=(2, 2, 2),
                                                     activation=leaky_relu)
            if current_layer >= dropout_start:
                previous_layer = layers.dropout_layer(previous_layer,
                                                      'dropout_' +
                                                      str(current_layer),
                                                      training,
                                                      keep_prob=0.9)

        dense_out = layers.dense_layer(previous_layer,
                                       'dense_out',
                                       hidden_units=1,
                                       activation=tf.identity)

    return dense_out
def bousmalis_generator(x,
                        z_noise,
                        training,
                        batch_normalization,
                        residual_blocks,
                        nfilters,
                        last_activation=tf.nn.tanh,
                        scope_name='generator',
                        scope_reuse=False):
    kernel_size = (3, 3, 3)
    strides = (1, 1, 1)
    # define layer for the residual blocks
    if batch_normalization:
        conv_layer = lambda bottom, name, activation: layers.conv3D_layer_bn(
            bottom,
            name,
            training=training,
            kernel_size=kernel_size,
            num_filters=nfilters,
            strides=strides,
            activation=activation)
    else:
        conv_layer = lambda bottom, name, activation: layers.conv3D_layer(
            bottom,
            name,
            kernel_size=kernel_size,
            num_filters=nfilters,
            strides=strides,
            activation=activation)
    with tf.variable_scope(scope_name) as scope:
        if scope_reuse:
            scope.reuse_variables()
        x_conv_in = x
        if z_noise is not None:
            # make sure the last dimension is 1 but the others agree with the image input
            noise_channel_shape = x.shape[:-1]
            # the batchsize stays constant
            fc_hidden_units = np.prod(noise_channel_shape[1:])
            fc_noise_layer = layers.dense_layer(z_noise,
                                                'fc_noise_layer',
                                                hidden_units=fc_hidden_units,
                                                activation=tf.identity)
            noise_channel = tf.reshape(fc_noise_layer, noise_channel_shape)
            noise_channel = tf.expand_dims(noise_channel, axis=-1)
            x_conv_in = tf.concat([x, noise_channel], axis=-1)
        previous_layer = layers.conv3D_layer(x_conv_in,
                                             'conv1',
                                             kernel_size=kernel_size,
                                             num_filters=nfilters,
                                             strides=strides,
                                             activation=tf.nn.relu)

        # place residual blocks
        for block_num in range(1, 1 + residual_blocks):
            previous_layer = layers.residual_block_original(
                previous_layer,
                'res_block_' + str(block_num),
                conv_layer,
                activation=tf.nn.relu,
                nlayers=2)

        conv_out = layers.conv3D_layer(previous_layer,
                                       'conv_out',
                                       kernel_size=kernel_size,
                                       num_filters=1,
                                       strides=strides,
                                       activation=last_activation)
        return conv_out