Ejemplo n.º 1
0
def betaVAE_bn(x,
               s_oh,
               zdim_0,
               training,
               scope_reuse=False,
               norm=tfnorm.batch_norm,
               **kwargs):

    resolution_levels = kwargs.get('resolution_levels', 5)
    image_size = x.get_shape().as_list()[1:3]
    final_kernel_size = [s // (2**(resolution_levels - 1)) for s in image_size]

    # POSTERIOR ####################
    with tf.variable_scope('posterior') as scope:

        if scope_reuse:
            scope.reuse_variables()

        n0 = kwargs.get('n0', 32)

        mu_z = []
        sigma_z = []
        z = []
        # Generate pre_z's

        net = tf.concat([x, s_oh - 0.5], axis=-1)

        for ii in range(resolution_levels - 1):
            net = layers.conv2D(net,
                                'q_z_%d' % ii,
                                num_filters=n0 * (ii // 2 + 1),
                                kernel_size=(4, 4),
                                strides=(2, 2),
                                normalisation=norm,
                                training=training)

        net = layers.conv2D(net,
                            'q_z_%d' % resolution_levels,
                            num_filters=n0 * 8,
                            kernel_size=final_kernel_size,
                            strides=(1, 1),
                            padding='VALID',
                            normalisation=norm,
                            training=training)

        mu_z.append(
            layers.dense_layer(net,
                               'z_mu',
                               hidden_units=zdim_0,
                               activation=tf.identity))
        sigma_z.append(
            layers.dense_layer(net,
                               'z_sigma',
                               hidden_units=zdim_0,
                               activation=tf.nn.softplus))

        z.append(mu_z[0] + sigma_z[0] *
                 tf.random_normal(tf.shape(mu_z[0]), 0, 1, dtype=tf.float32))

    return z, mu_z, sigma_z
Ejemplo n.º 2
0
def jia_xi_net(images,
               training,
               nlabels,
               scope_name='classifier',
               scope_reuse=False):

    with tf.variable_scope(scope_name) as scope:
        if scope_reuse:
            scope.reuse_variables()

        conv1_1 = layers.conv3D_layer(images, 'conv1_1', num_filters=32)

        pool1 = layers.max_pool_layer3d(conv1_1)

        conv2_1 = layers.conv3D_layer(pool1, 'conv2_1', num_filters=64)

        pool2 = layers.max_pool_layer3d(conv2_1)

        conv3_1 = layers.conv3D_layer(pool2, 'conv3_1', num_filters=128)
        conv3_2 = layers.conv3D_layer(conv3_1, 'conv3_2', num_filters=128)

        pool3 = layers.max_pool_layer3d(conv3_2)

        conv4_1 = layers.conv3D_layer(pool3, 'conv4_1', num_filters=256)
        conv4_2 = layers.conv3D_layer(conv4_1, 'conv4_2', num_filters=256)

        pool4 = layers.max_pool_layer3d(conv4_2)

        dense1 = layers.dense_layer(pool4, 'dense1', hidden_units=512)
        dense2 = layers.dense_layer(dense1,
                                    'dense2',
                                    hidden_units=nlabels,
                                    activation=tf.identity)

        return dense2
Ejemplo n.º 3
0
def betaVAE_bn(z_list,
               training,
               image_size,
               n_classes,
               scope_reuse=False,
               norm=tfnorm.batch_norm,
               **kwargs):
    """
    This is a U-NET like arch with skips before and after latent space and a rather simple decoder
    """

    resolution_levels = kwargs.get('resolution_levels', 5)
    final_kernel_size = [
        s // (2**(resolution_levels - 1)) for s in image_size[0:2]
    ]
    logging.info('@ likelihood final kernel size')
    logging.info(final_kernel_size)

    n0 = kwargs.get('n0', 32)

    with tf.variable_scope('likelihood') as scope:

        if scope_reuse:
            scope.reuse_variables()

        lat_ups = layers.dense_layer(z_list[0],
                                     'z_ups_1',
                                     hidden_units=8 * n0,
                                     normalisation=norm,
                                     training=training)
        lat_ups = layers.dense_layer(lat_ups,
                                     'z_ups_2',
                                     hidden_units=n0 * 4 *
                                     np.prod(final_kernel_size),
                                     normalisation=norm,
                                     training=training)

        z_reshaped = tf.reshape(
            lat_ups,
            tf.stack([-1, final_kernel_size[0], final_kernel_size[1], 4 * n0]))

        net = z_reshaped
        for ii in reversed(range(resolution_levels - 2)):
            net = layers.transposed_conv2D(net,
                                           num_filters=n0 * (ii // 2 + 1),
                                           name='deconv%d' % ii,
                                           normalisation=norm,
                                           training=training)

        s = [
            layers.transposed_conv2D(net,
                                     num_filters=n_classes,
                                     name='deconv_out_s',
                                     activation=tf.identity)
        ]

        return s
Ejemplo n.º 4
0
def CAM_net2D(x, nlabels, training, scope_reuse=False):

    with tf.variable_scope('classifier') as scope:

        if scope_reuse:
            scope.reuse_variables()

        init_filters = 32

        conv1_1 = layers.conv2D_layer_bn(x, 'conv1_1', num_filters=init_filters, training=training)

        pool1 = layers.maxpool2D_layer(conv1_1)

        conv2_1 = layers.conv2D_layer_bn(pool1, 'conv2_1', num_filters=init_filters*2, training=training)

        pool2 = layers.maxpool2D_layer(conv2_1)

        conv3_1 = layers.conv2D_layer_bn(pool2, 'conv3_1', num_filters=init_filters*4, training=training)
        conv3_2 = layers.conv2D_layer_bn(conv3_1, 'conv3_2', num_filters=init_filters*4, training=training)

        conv4_1 = layers.conv2D_layer_bn(conv3_2, 'conv4_1', num_filters=init_filters*8, training=training)
        conv4_2 = layers.conv2D_layer_bn(conv4_1, 'conv4_2', num_filters=init_filters*8, training=training)

        conv5_1 = layers.conv2D_layer_bn(conv4_2, 'conv5_1', num_filters=init_filters*16, training=training)
        conv5_2 = layers.conv2D_layer_bn(conv5_1, 'conv5_2', num_filters=init_filters*16, training=training)

        convD_1 = layers.conv2D_layer_bn(conv5_2, 'feature_maps', num_filters=init_filters*16, training=training)

        fm_averages = layers.averagepool2D_layer(convD_1, name='fm_averages')

        logits = layers.dense_layer(fm_averages, 'weight_layer', hidden_units=nlabels, activation=tf.identity, add_bias=False)

    return logits
Ejemplo n.º 5
0
def jia_xi_net_multitask_ordinal(images,
                                 training,
                                 nlabels,
                                 n_age_thresholds=5,
                                 scope_name='classifier',
                                 scope_reuse=False):
    with tf.variable_scope(scope_name) as scope:
        if scope_reuse:
            scope.reuse_variables()

        conv1_1 = layers.conv3D_layer(images, 'conv1_1', num_filters=32)

        pool1 = layers.max_pool_layer3d(conv1_1)

        conv2_1 = layers.conv3D_layer(pool1, 'conv2_1', num_filters=64)

        pool2 = layers.max_pool_layer3d(conv2_1)

        conv3_1 = layers.conv3D_layer(pool2, 'conv3_1', num_filters=128)
        conv3_2 = layers.conv3D_layer(conv3_1, 'conv3_2', num_filters=128)

        pool3 = layers.max_pool_layer3d(conv3_2)

        conv4_1 = layers.conv3D_layer(pool3, 'conv4_1', num_filters=256)
        conv4_2 = layers.conv3D_layer(conv4_1, 'conv4_2', num_filters=256)

        pool4 = layers.max_pool_layer3d(conv4_2)

        dense1 = layers.dense_layer(pool4, 'dense1', hidden_units=512)
        diagnosis = layers.dense_layer(dense1,
                                       'dense2',
                                       hidden_units=nlabels,
                                       activation=tf.identity)

        dense_ages = layers.dense_layer(pool4, 'dense_ages', hidden_units=512)

        ages_logits = []
        for ii in range(n_age_thresholds):
            ages_logits.append(
                layers.dense_layer(dense_ages,
                                   'age_%s' % str(ii),
                                   hidden_units=2,
                                   activation=tf.identity))

        return diagnosis, ages_logits
Ejemplo n.º 6
0
def only_conv_generator(z,
                        training,
                        residual=True,
                        batch_normalization=False,
                        hidden_layers=2,
                        filters=16,
                        input_noise_dim=0,
                        scope_name='generator',
                        scope_reuse=False):
    # batch size 2: hidden_layers=2, filters=16
    # batch size 1: hidden_layers=3, filters=32
    # only residual connection from beginning to end possible
    with tf.variable_scope(scope_name) as scope:
        if scope_reuse:
            scope.reuse_variables()
        previous_layer = z
        if input_noise_dim >= 1:
            # create noise, push it through a fc layer and concatenate it as a new channel
            noise_in = tf.random_uniform(shape=[
                previous_layer.get_shape().as_list()[0], input_noise_dim
            ],
                                         minval=-1,
                                         maxval=1)
            # make sure the last dimension is 1 but the others agree with the image input
            noise_channel_shape = previous_layer.shape[:-1]
            # the batchsize stays constant
            fc_hidden_units = np.prod(noise_channel_shape[1:])
            fc_noise_layer = layers.dense_layer(noise_in,
                                                'fc_noise_layer',
                                                hidden_units=fc_hidden_units,
                                                activation=tf.identity)
            noise_channel = tf.reshape(fc_noise_layer, noise_channel_shape)
            noise_channel = tf.expand_dims(noise_channel, axis=-1)
            previous_layer = tf.concat([previous_layer, noise_channel],
                                       axis=-1)
        for depth in range(1, hidden_layers + 1):
            if (batch_normalization):
                previous_layer = layers.conv3D_layer_bn(previous_layer,
                                                        'gconv%d' % depth,
                                                        training,
                                                        num_filters=filters,
                                                        activation=tf.nn.relu)
            else:
                previous_layer = layers.conv3D_layer(previous_layer,
                                                     'gconv%d' % depth,
                                                     num_filters=32,
                                                     activation=tf.nn.relu)
        last_layer = layers.conv3D_layer(previous_layer,
                                         'gconv%d_last' % (hidden_layers + 1),
                                         num_filters=1,
                                         kernel_size=(1, 1, 1),
                                         strides=(1, 1, 1),
                                         activation=tf.identity)
        if residual:
            return last_layer + z
        else:
            return last_layer
Ejemplo n.º 7
0
def unet_T_L(x,
             s_oh,
             zdim_0,
             training,
             scope_reuse=False,
             norm=tfnorm.batch_norm,
             **kwargs):

    # POSTERIOR ####################

    with tf.variable_scope('posterior') as scope:

        if scope_reuse:
            scope.reuse_variables()

        full_cov_list = kwargs.get('full_cov_list', None)

        n0 = kwargs.get('n0', 32)
        max_channel_power = kwargs.get('max_channel_power', 4)
        max_channels = n0 * 2**max_channel_power
        latent_levels = kwargs.get('latent_levels', 4)
        resolution_levels = kwargs.get('resolution_levels', 6)

        spatial_xdim = x.get_shape().as_list()[1:3]

        full_latent_dependencies = kwargs.get('full_latent_dependencies',
                                              False)

        pre_z = [None] * resolution_levels

        mu = [None] * latent_levels
        sigma = [None] * latent_levels
        z = [None] * latent_levels

        z_ups_mat = []
        for i in range(latent_levels):
            z_ups_mat.append(
                [None] *
                latent_levels)  # encoding [original resolution][upsampled to]

        # Generate pre_z's
        for i in range(resolution_levels):

            if i == 0:
                net = tf.concat([x, s_oh - 0.5], axis=-1)
            else:
                net = layers.reshape_pool2D_layer(pre_z[i - 1])

            net = layers.conv2D(net,
                                'z%d_pre_1' % i,
                                num_filters=n0 * (i // 2 + 1),
                                normalisation=norm,
                                training=training)
            net = layers.conv2D(net,
                                'z%d_pre_2' % i,
                                num_filters=n0 * (i // 2 + 1),
                                normalisation=norm,
                                training=training)

            pre_z[i] = net

        # Generate z's
        for i in reversed(range(latent_levels)):

            spatial_zdim = [
                d // 2**(i + resolution_levels - latent_levels)
                for d in spatial_xdim
            ]
            spatial_cov_dim = spatial_zdim[0] * spatial_zdim[1]

            if i == latent_levels - 1:

                mu[i] = layers.conv2D(pre_z[i + resolution_levels -
                                            latent_levels],
                                      'z%d_mu' % i,
                                      num_filters=zdim_0,
                                      activation=tf.identity)

                if full_cov_list[i] == True:

                    l = layers.dense_layer(
                        pre_z[i + resolution_levels - latent_levels],
                        'z%d_sigma' % i,
                        hidden_units=zdim_0 * spatial_cov_dim *
                        (spatial_cov_dim + 1) // 2,
                        activation=tf.identity)
                    l = tf.reshape(l, [
                        -1, zdim_0, spatial_cov_dim *
                        (spatial_cov_dim + 1) // 2
                    ])
                    Lp = tf.contrib.distributions.fill_triangular(l)
                    L = tf.linalg.set_diag(
                        Lp, tf.nn.softplus(tf.linalg.diag_part(Lp))
                    )  # Cholesky factors must have positive diagonal

                    sigma[i] = L

                    eps = tf.random_normal(tf.shape(mu[i]))
                    eps = tf.transpose(eps, perm=[0, 3, 1, 2])
                    bs = tf.shape(x)[0]
                    eps = tf.reshape(eps, tf.stack([bs, zdim_0, -1, 1]))

                    eps_tmp = tf.matmul(sigma[i], eps)
                    eps_tmp = tf.transpose(eps_tmp, perm=[0, 2, 3, 1])
                    eps_tmp = tf.reshape(
                        eps_tmp,
                        [bs, spatial_zdim[0], spatial_zdim[1], zdim_0])

                    z[i] = mu[i] + eps_tmp

                else:

                    sigma[i] = layers.conv2D(pre_z[i + resolution_levels -
                                                   latent_levels],
                                             'z%d_sigma' % i,
                                             num_filters=zdim_0,
                                             activation=tf.nn.softplus,
                                             kernel_size=(1, 1))
                    z[i] = mu[i] + sigma[i] * tf.random_normal(
                        tf.shape(mu[i]), 0, 1, dtype=tf.float32)

            else:

                for j in reversed(range(0, i + 1)):

                    z_below_ups = layers.nearest_neighbour_upsample2D(
                        z_ups_mat[j + 1][i + 1], factor=2)
                    z_below_ups = layers.conv2D(z_below_ups,
                                                name='z%d_ups_to_%d_c_1' %
                                                ((i + 1), (j + 1)),
                                                num_filters=zdim_0 * n0,
                                                normalisation=norm,
                                                training=training)
                    z_below_ups = layers.conv2D(z_below_ups,
                                                name='z%d_ups_to_%d_c_2' %
                                                ((i + 1), (j + 1)),
                                                num_filters=zdim_0 * n0,
                                                normalisation=norm,
                                                training=training)

                    z_ups_mat[j][i + 1] = z_below_ups

                if full_latent_dependencies:
                    z_input = tf.concat(
                        [pre_z[i + resolution_levels - latent_levels]] +
                        z_ups_mat[i][(i + 1):latent_levels],
                        axis=3,
                        name='concat_%d' % i)
                else:
                    z_input = tf.concat([
                        pre_z[i + resolution_levels - latent_levels],
                        z_ups_mat[i][i + 1]
                    ],
                                        axis=3,
                                        name='concat_%d' % i)

                z_input = layers.conv2D(z_input,
                                        'z%d_input_1' % i,
                                        num_filters=n0 * (i // 2 + 1),
                                        normalisation=norm,
                                        training=training)
                z_input = layers.conv2D(z_input,
                                        'z%d_input_2' % i,
                                        num_filters=n0 * (i // 2 + 1),
                                        normalisation=norm,
                                        training=training)

                mu[i] = layers.conv2D(z_input,
                                      'z%d_mu' % i,
                                      num_filters=zdim_0,
                                      activation=tf.identity,
                                      kernel_size=(1, 1))

                if full_cov_list[i] == True:

                    l = layers.dense_layer(z_input,
                                           'z%d_sigma' % i,
                                           hidden_units=zdim_0 *
                                           spatial_cov_dim *
                                           (spatial_cov_dim + 1) // 2,
                                           activation=tf.identity)
                    l = tf.reshape(l, [
                        -1, zdim_0, spatial_cov_dim *
                        (spatial_cov_dim + 1) // 2
                    ])
                    Lp = tf.contrib.distributions.fill_triangular(l)
                    L = tf.linalg.set_diag(
                        Lp, tf.nn.softplus(tf.linalg.diag_part(Lp)))

                    sigma[i] = L

                    eps = tf.random_normal(tf.shape(mu[i]))
                    eps = tf.transpose(eps, perm=[0, 3, 1, 2])
                    bs = tf.shape(x)[0]
                    eps = tf.reshape(eps, tf.stack([bs, zdim_0, -1, 1]))

                    eps_tmp = tf.matmul(sigma[i], eps)
                    eps_tmp = tf.transpose(eps_tmp, perm=[0, 2, 3, 1])
                    eps_tmp = tf.reshape(
                        eps_tmp,
                        [bs, spatial_zdim[0], spatial_zdim[1], zdim_0])

                    z[i] = mu[i] + eps_tmp

                else:

                    sigma[i] = layers.conv2D(z_input,
                                             'z%d_sigma' % i,
                                             num_filters=zdim_0,
                                             activation=tf.nn.softplus,
                                             kernel_size=(1, 1))
                    z[i] = mu[i] + sigma[i] * tf.random_normal(
                        tf.shape(mu[i]), 0, 1, dtype=tf.float32)

            z_ups_mat[i][i] = z[i]

    return z, mu, sigma
Ejemplo n.º 8
0
def segvae_const_latent(x,
                        s_oh,
                        zdim_0,
                        training,
                        scope_reuse=False,
                        norm=tfnorm.batch_norm,
                        **kwargs):

    n0 = kwargs.get('n0', 32)
    max_channel_power = kwargs.get('max_channel_power', 4)
    max_channels = n0 * 2**max_channel_power
    full_cov_list = kwargs.get('full_cov_list', None)

    resolution_levels = kwargs.get('resolution_levels', 5)

    def reduce_resolution(x, times, name):

        with tf.variable_scope(name):

            nett = x

            for ii in range(times):

                nett = layers.reshape_pool2D_layer(nett)
                nC = nett.get_shape().as_list()[3]
                nett = layers.conv2D(nett,
                                     'down_%d' % ii,
                                     num_filters=min(nC // 4, max_channels),
                                     normalisation=norm,
                                     training=training)

        return nett

    with tf.variable_scope('posterior') as scope:

        spatial_xdim = x.get_shape().as_list()[1:3]
        spatial_zdim = [d // 2**(resolution_levels - 1) for d in spatial_xdim]
        spatial_cov_dim = spatial_zdim[0] * spatial_zdim[1]

        if scope_reuse:
            scope.reuse_variables()

        n0 = kwargs.get('n0', 32)
        levels = resolution_levels

        full_latent_dependencies = kwargs.get('full_latent_dependencies',
                                              False)

        pre_z = [None] * levels
        mu = [None] * levels
        sigma = [None] * levels
        z = [None] * levels

        z_mat = []
        for i in range(levels):
            z_mat.append(
                [None] *
                levels)  # encoding [original resolution][upsampled to]

        # Generate pre_z's
        for i in range(levels):

            if i == 0:
                net = tf.concat([x, s_oh - 0.5], axis=-1)
            else:
                net = layers.maxpool2D(pre_z[i - 1])

            net = layers.conv2D(net,
                                'z%d_pre_1' % i,
                                num_filters=n0 * (i // 2 + 1),
                                normalisation=norm,
                                training=training)
            pre_z[i] = net

        # Generate z's
        for i in reversed(range(levels)):

            z_input = reduce_resolution(pre_z[i],
                                        levels - i - 1,
                                        name='reduction_%d' % i)
            logging.info('z_input.shape')
            logging.info(z_input.get_shape().as_list())

            if i == levels - 1:

                mu[i] = layers.conv2D(z_input,
                                      'z%d_mu' % i,
                                      num_filters=zdim_0,
                                      activation=tf.identity)

                if full_cov_list[i] == True:

                    l = layers.dense_layer(z_input,
                                           'z%d_sigma' % i,
                                           hidden_units=zdim_0 *
                                           spatial_cov_dim *
                                           (spatial_cov_dim + 1) // 2,
                                           activation=tf.identity)
                    l = tf.reshape(l, [
                        -1, zdim_0, spatial_cov_dim *
                        (spatial_cov_dim + 1) // 2
                    ])
                    Lp = tf.contrib.distributions.fill_triangular(l)
                    L = tf.linalg.set_diag(
                        Lp, tf.nn.softplus(tf.linalg.diag_part(Lp))
                    )  # Cholesky factors must have positive diagonal

                    logging.info('L%d.shape ==========' % i)
                    logging.info(L.get_shape().as_list())

                    sigma[i] = L

                    eps = tf.random_normal(tf.shape(mu[i]))
                    eps = tf.transpose(eps, perm=[0, 3, 1, 2])
                    bs = tf.shape(x)[0]
                    eps = tf.reshape(eps, tf.stack([bs, zdim_0, -1, 1]))

                    eps_tmp = tf.matmul(sigma[i], eps)
                    eps_tmp = tf.transpose(eps_tmp, perm=[0, 2, 3, 1])
                    eps_tmp = tf.reshape(
                        eps_tmp,
                        [bs, spatial_zdim[0], spatial_zdim[1], zdim_0])

                    z[i] = mu[i] + eps_tmp

                else:

                    sigma[i] = layers.conv2D(z_input,
                                             'z%d_sigma' % i,
                                             num_filters=zdim_0,
                                             activation=tf.nn.softplus,
                                             kernel_size=(1, 1))
                    z[i] = mu[i] + sigma[i] * tf.random_normal(
                        tf.shape(mu[i]), 0, 1, dtype=tf.float32)

            else:

                for j in reversed(range(0, i + 1)):
                    z_connect = layers.conv2D(z_mat[j + 1][i + 1],
                                              name='double_res_%d_to_%d' %
                                              ((i + 1), (j)),
                                              num_filters=2 * zdim_0,
                                              normalisation=norm,
                                              training=training)
                    z_mat[j][i + 1] = z_connect

                if full_latent_dependencies:
                    z_input = tf.concat([z_input] + z_mat[i][(i + 1):levels],
                                        axis=3,
                                        name='concat_%d' % i)
                else:
                    z_input = tf.concat([z_input, z_mat[i][(i + 1)]],
                                        axis=3,
                                        name='concat_%d' % i)

                mu[i] = layers.conv2D(z_input,
                                      'z%d_mu' % i,
                                      num_filters=zdim_0,
                                      activation=tf.identity)

                if full_cov_list[i] == True:
                    l = layers.dense_layer(z_input,
                                           'z%d_sigma' % i,
                                           hidden_units=zdim_0 *
                                           spatial_cov_dim *
                                           (spatial_cov_dim + 1) // 2,
                                           activation=tf.identity)
                    l = tf.reshape(l, [
                        -1, zdim_0, spatial_cov_dim *
                        (spatial_cov_dim + 1) // 2
                    ])
                    Lp = tf.contrib.distributions.fill_triangular(l)
                    L = tf.linalg.set_diag(
                        Lp, tf.nn.softplus(tf.linalg.diag_part(Lp)))

                    sigma[i] = L

                    eps = tf.random_normal(tf.shape(mu[i]))
                    eps = tf.transpose(eps, perm=[0, 3, 1, 2])
                    bs = tf.shape(x)[0]
                    eps = tf.reshape(eps, tf.stack([bs, zdim_0, -1, 1]))

                    eps_tmp = tf.matmul(sigma[i], eps)
                    eps_tmp = tf.transpose(eps_tmp, perm=[0, 2, 3, 1])
                    eps_tmp = tf.reshape(
                        eps_tmp,
                        [bs, spatial_zdim[0], spatial_zdim[1], zdim_0])

                    z[i] = mu[i] + eps_tmp

                else:

                    sigma[i] = layers.conv2D(z_input,
                                             'z%d_sigma' % i,
                                             num_filters=zdim_0,
                                             activation=tf.nn.softplus)
                    z[i] = mu[i] + sigma[i] * tf.random_normal(
                        tf.shape(mu[i]), 0, 1, dtype=tf.float32)

            z_mat[i][i] = z[i]

    return z, mu, sigma
Ejemplo n.º 9
0
def pool_fc_discriminator_bs2(x,
                              training,
                              scope_name='discriminator',
                              scope_reuse=False):
    with tf.variable_scope(scope_name) as scope:
        if scope_reuse:
            scope.reuse_variables()

        conv1_1 = layers.conv3D_layer(x,
                                      'dconv1_1',
                                      kernel_size=(3, 3, 3),
                                      num_filters=8,
                                      strides=(1, 1, 1),
                                      activation=layers.leaky_relu)

        pool1 = layers.max_pool_layer3d(conv1_1)

        conv2_1 = layers.conv3D_layer(pool1,
                                      'dconv2_1',
                                      kernel_size=(3, 3, 3),
                                      num_filters=16,
                                      strides=(1, 1, 1),
                                      activation=layers.leaky_relu)

        pool2 = layers.max_pool_layer3d(conv2_1)

        conv3_1 = layers.conv3D_layer(pool2,
                                      'dconv3_1',
                                      kernel_size=(3, 3, 3),
                                      num_filters=16,
                                      strides=(1, 1, 1),
                                      activation=layers.leaky_relu)

        conv3_2 = layers.conv3D_layer_bn(conv3_1,
                                         'dconv3_2',
                                         kernel_size=(3, 3, 3),
                                         num_filters=16,
                                         strides=(1, 1, 1),
                                         activation=layers.leaky_relu,
                                         training=training)

        pool3 = layers.max_pool_layer3d(conv3_2)

        conv4_1 = layers.conv3D_layer(pool3,
                                      'dconv4_1',
                                      kernel_size=(3, 3, 3),
                                      num_filters=16,
                                      strides=(1, 1, 1),
                                      activation=layers.leaky_relu)

        conv4_2 = layers.conv3D_layer(conv4_1,
                                      'dconv4_2',
                                      kernel_size=(3, 3, 3),
                                      num_filters=16,
                                      strides=(1, 1, 1),
                                      activation=layers.leaky_relu)

        pool4 = layers.max_pool_layer3d(conv4_2)

        conv5_1 = layers.conv3D_layer(pool4,
                                      'dconv5_1',
                                      kernel_size=(3, 3, 3),
                                      num_filters=16,
                                      strides=(1, 1, 1),
                                      activation=layers.leaky_relu)

        conv5_2 = layers.conv3D_layer(conv5_1,
                                      'dconv5_2',
                                      kernel_size=(3, 3, 3),
                                      num_filters=16,
                                      strides=(1, 1, 1),
                                      activation=layers.leaky_relu)

        pool5 = layers.max_pool_layer3d(conv5_2)

        conv6_1 = layers.conv3D_layer(pool5,
                                      'dconv6_1',
                                      kernel_size=(3, 3, 3),
                                      num_filters=16,
                                      strides=(1, 1, 1),
                                      activation=layers.leaky_relu)

        conv6_2 = layers.conv3D_layer(conv6_1,
                                      'dconv6_2',
                                      kernel_size=(3, 3, 3),
                                      num_filters=16,
                                      strides=(1, 1, 1),
                                      activation=layers.leaky_relu)

        pool6 = layers.max_pool_layer3d(conv6_2)

        dense1 = layers.dense_layer(pool6,
                                    'ddense1',
                                    hidden_units=256,
                                    activation=layers.leaky_relu)

        dense2 = layers.dense_layer(dense1,
                                    'ddense2',
                                    hidden_units=1,
                                    activation=tf.identity)

        return dense2
Ejemplo n.º 10
0
def bousmalis_discriminator(x,
                            training,
                            batch_normalization,
                            middle_layers,
                            initial_filters,
                            dropout_start=3,
                            scope_name='discriminator',
                            scope_reuse=False):
    # leaky relu has the same parameter as in the paper
    leaky_relu = lambda x: layers.leaky_relu(x, alpha=0.2)
    with tf.variable_scope(scope_name) as scope:
        if scope_reuse:
            scope.reuse_variables()
        if batch_normalization:
            previous_layer = layers.conv3D_layer_bn(
                x,
                'convs1_1',
                kernel_size=(3, 3, 3),
                num_filters=initial_filters,
                strides=(1, 1, 1),
                activation=leaky_relu,
                training=training)
        else:
            previous_layer = layers.conv3D_layer(x,
                                                 'convs1_1',
                                                 kernel_size=(3, 3, 3),
                                                 num_filters=initial_filters,
                                                 strides=(1, 1, 1),
                                                 activation=leaky_relu)

        for current_layer in range(2, 2 + middle_layers):
            num_filters = initial_filters * (2**(current_layer - 1))
            if batch_normalization:
                previous_layer = layers.conv3D_layer_bn(
                    previous_layer,
                    'convs2_' + str(current_layer),
                    kernel_size=(3, 3, 3),
                    num_filters=num_filters,
                    strides=(2, 2, 2),
                    activation=leaky_relu,
                    training=training)
            else:
                previous_layer = layers.conv3D_layer(previous_layer,
                                                     'convs2_' +
                                                     str(current_layer),
                                                     kernel_size=(3, 3, 3),
                                                     num_filters=num_filters,
                                                     strides=(2, 2, 2),
                                                     activation=leaky_relu)
            if current_layer >= dropout_start:
                previous_layer = layers.dropout_layer(previous_layer,
                                                      'dropout_' +
                                                      str(current_layer),
                                                      training,
                                                      keep_prob=0.9)

        dense_out = layers.dense_layer(previous_layer,
                                       'dense_out',
                                       hidden_units=1,
                                       activation=tf.identity)

    return dense_out
Ejemplo n.º 11
0
def bousmalis_generator(x,
                        z_noise,
                        training,
                        batch_normalization,
                        residual_blocks,
                        nfilters,
                        last_activation=tf.nn.tanh,
                        scope_name='generator',
                        scope_reuse=False):
    kernel_size = (3, 3, 3)
    strides = (1, 1, 1)
    # define layer for the residual blocks
    if batch_normalization:
        conv_layer = lambda bottom, name, activation: layers.conv3D_layer_bn(
            bottom,
            name,
            training=training,
            kernel_size=kernel_size,
            num_filters=nfilters,
            strides=strides,
            activation=activation)
    else:
        conv_layer = lambda bottom, name, activation: layers.conv3D_layer(
            bottom,
            name,
            kernel_size=kernel_size,
            num_filters=nfilters,
            strides=strides,
            activation=activation)
    with tf.variable_scope(scope_name) as scope:
        if scope_reuse:
            scope.reuse_variables()
        x_conv_in = x
        if z_noise is not None:
            # make sure the last dimension is 1 but the others agree with the image input
            noise_channel_shape = x.shape[:-1]
            # the batchsize stays constant
            fc_hidden_units = np.prod(noise_channel_shape[1:])
            fc_noise_layer = layers.dense_layer(z_noise,
                                                'fc_noise_layer',
                                                hidden_units=fc_hidden_units,
                                                activation=tf.identity)
            noise_channel = tf.reshape(fc_noise_layer, noise_channel_shape)
            noise_channel = tf.expand_dims(noise_channel, axis=-1)
            x_conv_in = tf.concat([x, noise_channel], axis=-1)
        previous_layer = layers.conv3D_layer(x_conv_in,
                                             'conv1',
                                             kernel_size=kernel_size,
                                             num_filters=nfilters,
                                             strides=strides,
                                             activation=tf.nn.relu)

        # place residual blocks
        for block_num in range(1, 1 + residual_blocks):
            previous_layer = layers.residual_block_original(
                previous_layer,
                'res_block_' + str(block_num),
                conv_layer,
                activation=tf.nn.relu,
                nlayers=2)

        conv_out = layers.conv3D_layer(previous_layer,
                                       'conv_out',
                                       kernel_size=kernel_size,
                                       num_filters=1,
                                       strides=strides,
                                       activation=last_activation)
        return conv_out
def C3D_32_bn(x,
              nlabels,
              training,
              n0=32,
              norm=tfnorm.batch_norm,
              scope_reuse=False):

    with tf.variable_scope('classifier') as scope:

        if scope_reuse:
            scope.reuse_variables()

        add_bias = False if norm == tfnorm.batch_norm else True

        conv1_1 = layers.conv3D(x,
                                'conv1_1',
                                num_filters=n0,
                                training=training,
                                normalisation=norm,
                                add_bias=add_bias)

        pool1 = layers.maxpool3D(conv1_1)

        conv2_1 = layers.conv3D(pool1,
                                'conv2_1',
                                num_filters=n0 * 2,
                                training=training,
                                normalisation=norm,
                                add_bias=add_bias)

        pool2 = layers.maxpool3D(conv2_1)

        conv3_1 = layers.conv3D(pool2,
                                'conv3_1',
                                num_filters=n0 * 4,
                                training=training,
                                normalisation=norm,
                                add_bias=add_bias)
        conv3_2 = layers.conv3D(conv3_1,
                                'conv3_2',
                                num_filters=n0 * 4,
                                training=training,
                                normalisation=norm,
                                add_bias=add_bias)

        pool3 = layers.maxpool3D(conv3_2)

        conv4_1 = layers.conv3D(pool3,
                                'conv4_1',
                                num_filters=n0 * 8,
                                training=training,
                                normalisation=norm,
                                add_bias=add_bias)
        conv4_2 = layers.conv3D(conv4_1,
                                'conv4_2',
                                num_filters=n0 * 8,
                                training=training,
                                normalisation=norm,
                                add_bias=add_bias)

        pool4 = layers.maxpool3D(conv4_2)

        dense1 = layers.dense_layer(pool4,
                                    'dense1',
                                    hidden_units=n0 * 16,
                                    training=training,
                                    normalisation=norm,
                                    add_bias=add_bias)
        diag_logits = layers.dense_layer(dense1,
                                         'diag_logits',
                                         hidden_units=nlabels,
                                         activation=tf.identity,
                                         training=training,
                                         normalisation=norm,
                                         add_bias=add_bias)

    return diag_logits