Exemple #1
0
def C3D_fcn_16_conditioned(x,
                           training,
                           scope_name='critic',
                           scope_reuse=False):

    with tf.variable_scope(scope_name) as scope:
        if scope_reuse:
            scope.reuse_variables()

        condition = x[:, :, :, :, 1:2]
        x = x[:, :, :, :, 0:1]

        conv1_1 = layers.conv3D_layer(x, 'conv1_1', num_filters=16)

        pool1 = layers.maxpool3D_layer(conv1_1)

        conv2_1 = layers.conv3D_layer(pool1, 'conv2_1', num_filters=32)

        pool2 = layers.maxpool3D_layer(conv2_1)

        conv3_1 = layers.conv3D_layer(pool2, 'conv3_1', num_filters=64)
        conv3_2 = layers.conv3D_layer(conv3_1, 'conv3_2', num_filters=64)

        pool3 = layers.maxpool3D_layer(conv3_2)

        conv4_1 = layers.conv3D_layer(pool3, 'conv4_1', num_filters=128)
        conv4_2 = layers.conv3D_layer(conv4_1, 'conv4_2', num_filters=128)

        pool4 = layers.maxpool3D_layer(conv4_2)

        conv5_1 = layers.conv3D_layer(pool4, 'conv5_1', num_filters=256)
        conv5_2 = layers.conv3D_layer(conv5_1, 'conv5_2', num_filters=256)

        convD_1 = layers.conv3D_layer(conv5_2, 'convD_1', num_filters=256)
        convD_2 = layers.conv3D_layer(convD_1,
                                      'convD_2',
                                      num_filters=1,
                                      kernel_size=(1, 1, 1),
                                      activation=tf.identity)
        logits = layers.averagepool3D_layer(convD_2, name='diagnosis_avg')

        condition_logits = C3D_fcn_16(condition,
                                      training,
                                      scope_name="critic_condition",
                                      scope_reuse=scope_reuse)

        all_logits = tf.concat([logits, condition_logits], axis=-1)
        decision = tf.layers.dense(all_logits, 1, reuse=scope_reuse)

    return decision
def C3D_fcn_16_body(x,
                    training,
                    scope_name='classifier',
                    scope_reuse=False,
                    **kwargs):
    with tf.variable_scope(scope_name) as scope:
        if scope_reuse:
            scope.reuse_variables()

        conv1_1 = layers.conv3D_layer(x, 'conv1_1', num_filters=16)

        pool1 = layers.maxpool3D_layer(conv1_1)

        conv2_1 = layers.conv3D_layer(pool1, 'conv2_1', num_filters=32)

        pool2 = layers.maxpool3D_layer(conv2_1)

        conv3_1 = layers.conv3D_layer(pool2, 'conv3_1', num_filters=64)
        conv3_2 = layers.conv3D_layer(conv3_1, 'conv3_2', num_filters=64)

        pool3 = layers.maxpool3D_layer(conv3_2)

        conv4_1 = layers.conv3D_layer(pool3, 'conv4_1', num_filters=128)
        conv4_2 = layers.conv3D_layer(conv4_1, 'conv4_2', num_filters=128)

        pool4 = layers.maxpool3D_layer(conv4_2)

        conv5_1 = layers.conv3D_layer(pool4, 'conv5_1', num_filters=256)
        conv5_2 = layers.conv3D_layer(conv5_1, 'conv5_2', num_filters=256)

        n_channels = 256
        if "params" in kwargs:
            if "hidden_dim" in kwargs["params"]:
                n_channels = kwargs["params"]["hidden_dim"]
        convD_1 = layers.conv3D_layer(conv5_2,
                                      'convD_1',
                                      num_filters=n_channels)

        logits = tf.reduce_mean(convD_1, axis=(1, 2, 3))

    return logits
Exemple #3
0
def C3D_fcn_16(x, training, scope_name='critic', scope_reuse=False):

    with tf.variable_scope(scope_name) as scope:
        if scope_reuse:
            scope.reuse_variables()

        conv1_1 = layers.conv3D_layer(x, 'conv1_1', num_filters=16)

        pool1 = layers.maxpool3D_layer(conv1_1)

        conv2_1 = layers.conv3D_layer(pool1, 'conv2_1', num_filters=32)

        pool2 = layers.maxpool3D_layer(conv2_1)

        conv3_1 = layers.conv3D_layer(pool2, 'conv3_1', num_filters=64)
        conv3_2 = layers.conv3D_layer(conv3_1, 'conv3_2', num_filters=64)

        pool3 = layers.maxpool3D_layer(conv3_2)

        conv4_1 = layers.conv3D_layer(pool3, 'conv4_1', num_filters=128)
        conv4_2 = layers.conv3D_layer(conv4_1, 'conv4_2', num_filters=128)

        pool4 = layers.maxpool3D_layer(conv4_2)

        conv5_1 = layers.conv3D_layer(pool4, 'conv5_1', num_filters=256)
        conv5_2 = layers.conv3D_layer(conv5_1, 'conv5_2', num_filters=256)

        convD_1 = layers.conv3D_layer(conv5_2, 'convD_1', num_filters=256)
        convD_2 = layers.conv3D_layer(convD_1,
                                      'convD_2',
                                      num_filters=1,
                                      kernel_size=(1, 1, 1),
                                      activation=tf.identity)

        logits = layers.averagepool3D_layer(convD_2, name='diagnosis_avg')

    return logits
    def construct_graph(self):
        features = self.features
        params = self.params

        x = features[self.input_key]
        n_filters = params["n_filters"]
        filter_sizes = params["filter_sizes"]

        if params["normalize_images"]:
            x = self.normalize_voxels(x)

        input_shape = params["input_shape"]
        # Reshape to have one explicit channel
        x = tf.reshape(x,
                       [-1, input_shape[0], input_shape[1], input_shape[2], 1])

        self.x = x
        current_input = x

        n_ch_0 = 8
        self.n_ch_0 = n_ch_0
        conv1_1 = layers.conv3D_layer(x, 'conv1_1', num_filters=n_ch_0)
        # conv1_2 = layers.conv3D_layer(conv1_1, 'conv1_2', num_filters=n_ch_0)
        pool1 = layers.maxpool3D_layer(conv1_1)

        conv2_1 = layers.conv3D_layer(pool1, 'conv2_1', num_filters=n_ch_0 * 2)
        # conv2_2 = layers.conv3D_layer(conv2_1, 'conv2_2', num_filters=n_ch_0*2)
        pool2 = layers.maxpool3D_layer(conv2_1)

        conv3_1 = layers.conv3D_layer(pool2, 'conv3_1', num_filters=n_ch_0 * 4)
        # conv3_2 = layers.conv3D_layer(conv3_1, 'conv3_2', num_filters=n_ch_0*4)
        # pool3 = layers.maxpool3D_layer(conv3_2)

        # conv4_1 = layers.conv3D_layer(pool3, 'conv4_1', num_filters=n_ch_0*8)
        # conv4_2 = layers.conv3D_layer(conv4_1, 'conv4_2', num_filters=n_ch_0*8)

        self.z = tf.contrib.layers.flatten(conv3_1)
        current_input = self.z

        dim_list = current_input.get_shape().as_list()[1:]
        cur_dim = reduce(lambda x, y: x * y, dim_list)
        self.linear_trafo = False
        if cur_dim > self.get_encoding_dim():
            print("Non conv layer needed")
            self.linear_trafo = True
            self.dim_before_linear_trafo = cur_dim
            self.dim_list = dim_list
            current_input = tf.contrib.layers.flatten(current_input)
            W = tf.get_variable(
                "non_conv_w",
                shape=[cur_dim, self.get_encoding_dim()],
                initializer=tf.contrib.layers.xavier_initializer(seed=40))
            b = tf.get_variable("non_conv_b",
                                shape=[1, self.get_encoding_dim()],
                                initializer=tf.initializers.zeros)

            self.linear_w = W

            current_input = tf.add(tf.nn.relu(tf.matmul(current_input, W)), b)

            self.z = current_input

        self.conv1_1 = conv1_1
        # self.conv1_2 = conv1_2
        self.pool1 = pool1
        self.conv2_1 = conv2_1
        # self.conv2_2 = conv2_2
        self.pool2 = pool2
        self.conv3_1 = conv3_1
def unet_16_bn(x, training, scope_name='generator'):

    n_ch_0 = 16

    with tf.variable_scope(scope_name):

        conv1_1 = layers.conv3D_layer_bn(x,
                                         'conv1_1',
                                         num_filters=n_ch_0,
                                         training=training)
        conv1_2 = layers.conv3D_layer_bn(conv1_1,
                                         'conv1_2',
                                         num_filters=n_ch_0,
                                         training=training)
        pool1 = layers.maxpool3D_layer(conv1_2)

        conv2_1 = layers.conv3D_layer_bn(pool1,
                                         'conv2_1',
                                         num_filters=n_ch_0 * 2,
                                         training=training)
        conv2_2 = layers.conv3D_layer_bn(conv2_1,
                                         'conv2_2',
                                         num_filters=n_ch_0 * 2,
                                         training=training)
        pool2 = layers.maxpool3D_layer(conv2_2)

        conv3_1 = layers.conv3D_layer_bn(pool2,
                                         'conv3_1',
                                         num_filters=n_ch_0 * 4,
                                         training=training)
        conv3_2 = layers.conv3D_layer_bn(conv3_1,
                                         'conv3_2',
                                         num_filters=n_ch_0 * 4,
                                         training=training)
        pool3 = layers.maxpool3D_layer(conv3_2)

        conv4_1 = layers.conv3D_layer_bn(pool3,
                                         'conv4_1',
                                         num_filters=n_ch_0 * 8,
                                         training=training)
        conv4_2 = layers.conv3D_layer_bn(conv4_1,
                                         'conv4_2',
                                         num_filters=n_ch_0 * 8,
                                         training=training)

        upconv3 = layers.deconv3D_layer_bn(conv4_2,
                                           name='upconv3',
                                           num_filters=n_ch_0,
                                           training=training)
        concat3 = layers.crop_and_concat_layer_fixed([upconv3, conv3_2],
                                                     axis=-1)

        conv5_1 = layers.conv3D_layer_bn(concat3,
                                         'conv5_1',
                                         num_filters=n_ch_0 * 4,
                                         training=training)

        conv5_2 = layers.conv3D_layer_bn(conv5_1,
                                         'conv5_2',
                                         num_filters=n_ch_0 * 4,
                                         training=training)

        upconv2 = layers.deconv3D_layer_bn(conv5_2,
                                           name='upconv2',
                                           num_filters=n_ch_0,
                                           training=training)
        concat2 = layers.crop_and_concat_layer_fixed([upconv2, conv2_2],
                                                     axis=-1)

        conv6_1 = layers.conv3D_layer_bn(concat2,
                                         'conv6_1',
                                         num_filters=n_ch_0 * 2,
                                         training=training)
        conv6_2 = layers.conv3D_layer_bn(conv6_1,
                                         'conv6_2',
                                         num_filters=n_ch_0 * 2,
                                         training=training)

        upconv1 = layers.deconv3D_layer_bn(conv6_2,
                                           name='upconv1',
                                           num_filters=n_ch_0,
                                           training=training)

        concat1 = layers.crop_and_concat_layer_fixed([upconv1, conv1_2],
                                                     axis=-1)
        #concat1 = upconv1

        conv8_1 = layers.conv3D_layer_bn(concat1,
                                         'conv8_1',
                                         num_filters=n_ch_0,
                                         training=training)
        conv8_2 = layers.conv3D_layer(conv8_1,
                                      'conv8_2',
                                      num_filters=1,
                                      activation=tf.identity)

    return conv8_2