Exemple #1
0
    def dense_block(self,
                    input_x,
                    nb_layers,
                    layer_name,
                    keep_prob=None,
                    downsample=False):
        """
        Creates a dense block connection all same sized filters
        :param input_x: The input to this dense block (output of prior downsample operation)
        :param nb_layers: how many layers desired
        :param layer_name: base name of this block
        :param keep_prob: Whether to use dropout
        :param trans: whether to include a downsample at the end
        :return:
        """

        with tf.name_scope(layer_name):

            # Array to hold each layer
            layers_concat = []

            # Append to list
            layers_concat.append(input_x)

            # The first layer of this block
            conv = self.bottleneck_layer(input_x, (layer_name + '_denseN_0'),
                                         keep_prob)

            # Concat the first layer
            layers_concat.append(conv)

            # Loop through the number of layers desired
            for z in range(nb_layers):

                # Concat all the prior layer into this layer
                conv = tf.concat(layers_concat, axis=-1)

                # Create a new layer
                conv = self.bottleneck_layer(
                    conv, (layer_name + '_denseN_' + str(z + 1)), keep_prob)

                # Append this layer to the running list of dense connected layers
                layers_concat.append(conv)

            # Combine the layers
            conv = tf.concat(layers_concat, axis=-1)

            # Downsample if requested
            if downsample:
                conv = self.transition_layer(conv,
                                             (layer_name + '_Downsample'),
                                             keep_prob)

            return conv
Exemple #2
0
    def dense_block_3d(self, input_x, nb_layers, layer_name, keep_prob=None):
        """
        Creates a dense block
        :param input_x: The input to this dense block (output of prior downsample operation)
        :param nb_layers: how many layers desired
        :param layer_name: base name of this block
        :param keep_prob: Whether to use dropout
        :return:
        """

        with tf.name_scope(layer_name):

            # Array to hold each layer
            layers_concat = []

            # Append to list
            layers_concat.append(input_x)

            # The first layer of this block
            conv = self.bottleneck_layer_3d(input_x,
                                            (layer_name + '_denseN_0'),
                                            keep_prob)

            # Concat the first layer
            layers_concat.append(conv)

            # Loop through the number of layers desired
            for z in range(nb_layers):

                # Concat all the prior layer into this layer
                conv = tf.concat(layers_concat, axis=-1)

                # Create a new layer
                conv = self.bottleneck_layer_3d(
                    conv, (layer_name + '_denseN_' + str(z + 1)), keep_prob)

                # Append this layer to the running list of dense connected layers
                layers_concat.append(conv)

            # The concat has to be 2D, first retreive the Z and K size
            concat = tf.concat(layers_concat, -1)
            Fz, Ch = concat.get_shape().as_list()[1], concat.get_shape(
            ).as_list()[-1]

            # Now create a projection matrix
            concat = self.convolution_3d(layer_name + '_projection',
                                         concat, [Fz, 1, 1],
                                         Ch,
                                         1,
                                         'VALID',
                                         self.phase_train,
                                         BN=False)

            return conv, tf.squeeze(concat)
Exemple #3
0
    def up_transition_3d(self,
                         scope,
                         X,
                         F,
                         K,
                         S,
                         concat_var=None,
                         padding='VALID'):
        """
        Deconvolutions for the DenseUnet
        :param scope:
        :param X:
        :param F:
        :param K:
        :param S:
        :param padding:
        :param concat_var:
        :param summary:
        :return:
        """

        with tf.variable_scope(scope) as scope:

            # Set channel size based on input depth
            C = X.get_shape().as_list()[-1]

            # He init
            kernel = tf.get_variable(
                'Weights',
                shape=[F, F, F, K, C],
                initializer=tf.contrib.layers.variance_scaling_initializer())

            # Define the biases
            bias = tf.get_variable('Bias',
                                   shape=[K],
                                   initializer=tf.constant_initializer(0.0))

            # Add to the weights collection
            tf.add_to_collection('weights', kernel)
            tf.add_to_collection('biases', bias)

            # Define the output shape
            out_shape = X.get_shape().as_list()
            out_shape[1] *= 2
            out_shape[2] *= 2
            out_shape[3] *= 2
            out_shape[3] = K

            # Perform the deconvolution. output_shape: A 1-D Tensor representing the output shape of the deconvolution op.
            conv = tf.nn.conv2d_transpose(X,
                                          kernel,
                                          output_shape=out_shape,
                                          strides=[1, S, S, S, 1],
                                          padding=padding)

            # Add in bias
            conv = tf.nn.bias_add(conv, bias)

            # Concatenate
            conv = tf.concat([concat_var, conv], axis=-1)

            # Create a histogram summary and summary of sparsity
            if self.summary: self._activation_summary(conv)

            return conv
Exemple #4
0
    def up_transition(self,
                      scope,
                      X,
                      F,
                      K,
                      S,
                      concat_var=None,
                      padding='SAME',
                      res=True):
        """
        Performs an upsampling procedure
        :param scope:
        :param X: Inputs
        :param F: Filter sizes
        :param K: Kernel sizes
        :param S: Stride size
        :param concat_var: The skip connection
        :param padding: SAME or VALID. In general, use VALID for 3D skip connections
        :param res: Whether to concatenate or add the skip connection
        :return:
        """

        with tf.variable_scope(scope) as scope:

            # Set channel size based on input depth
            C = X.get_shape().as_list()[-1]

            # He init
            kernel = tf.get_variable(
                'Weights',
                shape=[F, F, K, C],
                initializer=tf.contrib.layers.variance_scaling_initializer())

            # Define the biases
            bias = tf.get_variable('Bias',
                                   shape=[K],
                                   initializer=tf.constant_initializer(0.0))

            # Add to the weights collection
            tf.add_to_collection('weights', kernel)
            tf.add_to_collection('biases', bias)

            # Define the output shape based on shape of skip connection
            out_shape = concat_var.get_shape().as_list()
            out_shape[-1] = K

            # Perform the deconvolution. output_shape: A 1-D Tensor representing the output shape of the deconvolution op.
            conv = tf.nn.conv2d_transpose(X,
                                          kernel,
                                          output_shape=out_shape,
                                          strides=[1, S, S, 1],
                                          padding=padding)

            # Concatenate
            if res: conv = tf.add(conv, concat_var)
            else: conv = tf.concat([concat_var, conv], axis=-1)

            # Apply the batch normalization. Updates weights during training phase only
            conv = self.batch_normalization(conv, self.phase_train, scope)

            # Add in bias
            conv = tf.nn.bias_add(conv, bias)

            # Relu
            conv = tf.nn.relu(conv, name=scope.name)

            # Create a histogram summary and summary of sparsity
            if self.summary: self._activation_summary(conv)

            return conv