def FC_DenseNet103(image_shape, n_filters_first_conv=48, n_pool=4, 
                   growth_rate=12, n_layers_per_block=5, dropout_p=0.2,
                   loss_type="bce", optimizer="sgd", lr=0.001):
    """Create FC-DenseNet103 (Tiramisu network) proposed in `The One Hundred 
       Layers Tiramisu: Fully Convolutional DenseNets for Semantic Segmentation <https://arxiv.org/pdf/1611.09326.pdf>`_ . 
       Code copied from `FC-DenseNet103 <https://github.com/SimJeg/FC-DenseNet/blob/master/FC-DenseNet.py>`_
       and just adapted from Lasagne to Keras.

       The network consist of a downsampling path, where dense blocks and 
       transition down are applied, followed by an upsampling path where
       transition up and dense blocks are applied.  Skip connections are used
       between the downsampling path and the upsampling path Each layer is a
       composite function of BN - ReLU - Conv and the last layer is a softmax layer.
        
       Parameters
       ----------
       image_shape : array of 3 int
           Dimensions of the input image.

       n_filters_first_conv : int, optional
           Number of filters for the first convolution applied.

       n_pool : int, optional
           Number of pooling layers = number of transition down = number of 
           transition up.

       growth_rate : int, optional
           Number of new feature maps created by each layer in a dense block.
       
       n_layers_per_block : array of ints, optional
           Number of layers per block. Can be an int or a list of size 
           ``(2*n_pool)+1``.

       dropout_p : float, optional
           Dropout rate applied after each convolution (``0.0`` for not using).

       loss_type : str, optional
           Loss type to use, three type available: ``bce`` (Binary Cross Entropy),
           ``w_bce`` (Weighted BCE, based on weigth maps) and ``w_bce_dice``
           (Weighted loss: ``weight1*BCE + weight2*Dice``). 
                                                                           
       optimizer : str, optional
           Optimizer used to minimize the loss function. Posible options: ``sgd``
           or ``adam``.                         
                                                                           
       lr : float, optional
           Learning rate value.

       Returns
       -------
       model : Keras model
           Model containing the FC_DenseNet103.
    """
    
    if type(n_layers_per_block) == list:
        assert (len(n_layers_per_block) == 2 * n_pool + 1)
    elif type(n_layers_per_block) == int:
        n_layers_per_block = [n_layers_per_block] * (2 * n_pool + 1)
    else:
        raise ValueError

    dinamic_dim = (None,)*(len(image_shape)-1) + (image_shape[-1],)
    inputs = Input(dinamic_dim)
    #inputs = Input(image_shape)

    #####################
    # First Convolution #
    #####################

    # We perform a first convolution. All the features maps will be stored in the 
    # tensor called stack (the Tiramisu)
    stack = Conv2D(n_filters_first_conv, 3, activation='relu', padding='same',
                   kernel_initializer='he_uniform')(inputs)

    # The number of feature maps in the stack is stored in the variable n_filters
    n_filters = n_filters_first_conv

    #####################
    # Downsampling path #
    #####################

    skip_connection_list = []

    for i in range(n_pool):
        # Dense Block
        for j in range(n_layers_per_block[i]):
            # Compute new feature maps
            l = BN_ReLU_Conv(stack, growth_rate, dropout_p=dropout_p)
            # And stack it : the Tiramisu is growing
            stack = concatenate([stack, l])
            n_filters += growth_rate
        # At the end of the dense block, the current stack is stored in the 
        # skip_connections list
        skip_connection_list.append(stack)

        # Transition Down
        stack = TransitionDown(stack, n_filters, dropout_p)

    skip_connection_list = skip_connection_list[::-1]

    #####################
    #     Bottleneck    #
    #####################

    # We store now the output of the next dense block in a list. We will only 
    # upsample these new feature maps
    block_to_upsample = []

    # Dense Block
    for j in range(n_layers_per_block[n_pool]):
        l = BN_ReLU_Conv(stack, growth_rate, dropout_p=dropout_p)
        block_to_upsample.append(l)
        stack = concatenate([stack, l])

    #######################
    #   Upsampling path   #
    #######################

    for i in range(n_pool):
        # Transition Up ( Upsampling + concatenation with the skip connection)
        n_filters_keep = growth_rate * n_layers_per_block[n_pool + i]
        stack = TransitionUp(skip_connection_list[i], block_to_upsample, n_filters_keep)

        # Dense Block
        block_to_upsample = []
        for j in range(n_layers_per_block[n_pool + i + 1]):
            l = BN_ReLU_Conv(stack, growth_rate, dropout_p=dropout_p)
            block_to_upsample.append(l)
            stack = concatenate([stack, l])

    # Changed from the original code as there is only one class in the data used 
    outputs = Conv2D(1, (1, 1), activation='sigmoid') (stack)

    model = Model(inputs=[inputs], outputs=[outputs])

    # Select the optimizer
    if optimizer == "sgd":
        opt = tf.keras.optimizers.SGD(
            lr=lr, momentum=0.99, decay=0.0, nesterov=False)
    elif optimizer == "adam":
        opt = tf.keras.optimizers.Adam(
            lr=lr, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0,
            amsgrad=False)
    else:
        raise ValueError("Error: optimizer value must be 'sgd' or 'adam'")

    # Compile the model
    if loss_type == "bce":
        model.compile(optimizer=opt, loss='binary_crossentropy',
                      metrics=[jaccard_index])
    elif loss_type == "w_bce":
        model.compile(optimizer=opt, loss=binary_crossentropy_weighted(weights),
                      metrics=[jaccard_index])
    elif loss_type == "w_bce_dice":
        model.compile(optimizer=opt,
                      loss=weighted_bce_dice_loss(w_dice=0.66, w_bce=0.33),
                      metrics=[jaccard_index])
    else:
        raise ValueError("'loss_type' must be 'bce', 'w_bce' or 'w_bce_dice'")

    return model
Пример #2
0
def ResUNet_2D(image_shape, activation='elu', k_init='he_normal',
               drop_values=[0.1,0.1,0.1,0.1,0.1], batch_norm=False, 
               feature_maps=[16,32,64,128,256], depth=4, loss_type="bce", 
               optimizer="sgd", lr=0.001, n_classes=1):

    """Create 2D Residual_U-Net.

       Parameters
       ----------
       image_shape : array of 3 int
           Dimensions of the input image.

       activation : str, optional
           Keras available activation type.

       k_init : str, optional
           Keras available kernel initializer type.

       drop_values : array of floats, optional
           Dropout value to be fixed.

       batch_norm : bool, optional
           Use batch normalization.

       feature_maps : array of ints, optional
           Feature maps to use on each level. Must have the same length as the 
           ``depth+1``.
       
       depth : int, optional
           Depth of the network.                        
                                                                           
       loss_type : str, optional
           Loss type to use, three type available: ``bce`` (Binary Cross Entropy),
           ``w_bce`` (Weighted BCE, based on weigth maps) and ``w_bce_dice``
           (Weighted loss: ``weight1*BCE + weight2*Dice``). 
                                                                           
       optimizer : str, optional
           Optimizer used to minimize the loss function. Posible options: 
           ``sgd`` or ``adam``.                         
                                                                           
       lr : float, optional
           Learning rate value.

       n_classes: int, optional                                                 
           Number of classes.  

       Returns
       -------
       model : Keras model
           Model containing the U-Net.


       Calling this function with its default parameters returns the following
       network:

       .. image:: img/resunet.png
           :width: 100%
           :align: center

       Where each green layer represents a residual block as the following:
        
       .. image:: img/res_block.png
           :width: 45%
           :align: center

       Images created with `PlotNeuralNet <https://github.com/HarisIqbal88/PlotNeuralNet>`_.
    """

    if len(feature_maps) != depth+1:                                            
        raise ValueError("feature_maps dimension must be equal depth+1")
    if len(drop_values) != depth+1:
        raise ValueError("'drop_values' dimension must be equal depth+1")

    fm = feature_maps[::-1]

    dinamic_dim = (None,)*(len(image_shape)-1) + (image_shape[-1],)
    inputs = Input(dinamic_dim)

    x = level_block(inputs, depth, fm, 3, activation, k_init, drop_values, 
                    batch_norm, True)

    outputs = Conv2D(n_classes, (1, 1), activation='sigmoid') (x)

    model = Model(inputs=[inputs], outputs=[outputs])

    # Select the optimizer
    if optimizer == "sgd":
        opt = tf.keras.optimizers.SGD(
            lr=lr, momentum=0.99, decay=0.0, nesterov=False)
    elif optimizer == "adam":
        opt = tf.keras.optimizers.Adam(
            lr=lr, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0,
            amsgrad=False)
    else:
        raise ValueError("Error: optimizer value must be 'sgd' or 'adam'")

    # Compile the model
    if loss_type == "bce":
        model.compile(optimizer=opt, loss='binary_crossentropy',
                      metrics=[jaccard_index])
    elif loss_type == "w_bce":
        model.compile(optimizer=opt, loss=binary_crossentropy_weighted(weights),
                      metrics=[jaccard_index])
    elif loss_type == "w_bce_dice":
        model.compile(optimizer=opt,
                      loss=weighted_bce_dice_loss(w_dice=0.66, w_bce=0.33),
                      metrics=[jaccard_index])
    else:
        raise ValueError("'loss_type' must be 'bce', 'w_bce' or 'w_bce_dice'")

    return model
Пример #3
0
def SE_U_Net_3D(image_shape,
                activation='elu',
                feature_maps=[32, 64, 128, 256],
                depth=3,
                drop_values=[0.1, 0.1, 0.1, 0.1],
                spatial_dropout=False,
                batch_norm=False,
                k_init='he_normal',
                loss_type="bce",
                optimizer="sgd",
                lr=0.001,
                n_classes=1):
    """Create 3D U-Net with squeeze-excite blocks.

       Reference `Squeeze and Excitation Networks <https://arxiv.org/abs/1709.01507>`_.

       Parameters
       ----------
       image_shape : 3D tuple
           Dimensions of the input image.

       activation : str, optional
           Keras available activation type.

       feature_maps : array of ints, optional
           Feature maps to use on each level. Must have the same length as the 
           ``depth+1``.
   
       depth : int, optional
           Depth of the network.

       drop_values : float, optional
           Dropout value to be fixed. 

       spatial_dropout : bool, optional
           Use spatial dropout instead of the `normal` dropout.

       batch_norm : bool, optional
           Make batch normalization.
    
       k_init : string, optional
           Kernel initialization for convolutional layers.

       loss_type : str, optional
           Loss type to use, three type available: ``bce`` (Binary Cross Entropy) 
           , ``w_bce`` (Weighted BCE, based on weigth maps) and ``w_bce_dice`` 
           (Weighted loss: ``weight1*BCE + weight2*Dice``). 

       optimizer : str, optional
           Optimizer used to minimize the loss function. Posible options: ``sgd``
           or ``adam``.

       lr : float, optional
           Learning rate value.

       n_classes: int, optional                                                 
           Number of classes.    

       Returns
       -------
       model : Keras model
           Model containing the U-Net.

    
       Calling this function with its default parameters returns the following
       network:

       .. image:: img/unet_3d.png
           :width: 100%
           :align: center

       Image created with `PlotNeuralNet <https://github.com/HarisIqbal88/PlotNeuralNet>`_.
    """

    if len(feature_maps) != depth + 1:
        raise ValueError("feature_maps dimension must be equal depth+1")
    if len(drop_values) != depth + 1:
        raise ValueError("'drop_values' dimension must be equal depth+1")

    #dinamic_dim = (None,)*(len(image_shape)-1) + (image_shape[-1],)
    #inputs = Input(dinamic_dim)
    x = Input(image_shape)
    inputs = x

    if loss_type == "w_bce":
        weights = Input(image_shape)

    # List used to access layers easily to make the skip connections of the U-Net
    l = []

    # ENCODER
    for i in range(depth):
        x = Conv3D(feature_maps[i], (3, 3, 3),
                   activation=None,
                   kernel_initializer=k_init,
                   padding='same')(x)
        x = BatchNormalization()(x) if batch_norm else x
        x = Activation(activation)(x)
        x = squeeze_excite_block(x)
        if spatial_dropout and drop_values[i] > 0:
            x = SpatialDropout3D(drop_values[i])(x)
        elif drop_values[i] > 0 and not spatial_dropout:
            x = Dropout(drop_values[i])(x)

        x = Conv3D(feature_maps[i], (3, 3, 3),
                   activation=None,
                   kernel_initializer=k_init,
                   padding='same')(x)
        x = BatchNormalization()(x) if batch_norm else x
        x = Activation(activation)(x)
        x = squeeze_excite_block(x)

        l.append(x)

        x = MaxPooling3D((2, 2, 2))(x)

    # BOTTLENECK
    x = Conv3D(feature_maps[depth], (3, 3, 3),
               activation=None,
               kernel_initializer=k_init,
               padding='same')(x)
    x = BatchNormalization()(x) if batch_norm else x
    x = Activation(activation)(x)
    if spatial_dropout and drop_values[depth] > 0:
        x = SpatialDropout3D(drop_values[depth])(x)
    elif drop_values[depth] > 0 and not spatial_dropout:
        x = Dropout(drop_values[depth])(x)

    x = Conv3D(feature_maps[depth], (3, 3, 3),
               activation=None,
               kernel_initializer=k_init,
               padding='same')(x)
    x = BatchNormalization()(x) if batch_norm else x
    x = Activation(activation)(x)

    # DECODER
    for i in range(depth - 1, -1, -1):
        x = Conv3DTranspose(feature_maps[i], (2, 2, 2),
                            strides=(2, 2, 2),
                            padding='same')(x)
        x = concatenate([x, l[i]])
        x = Conv3D(feature_maps[i], (3, 3, 3),
                   activation=None,
                   kernel_initializer=k_init,
                   padding='same')(x)
        x = BatchNormalization()(x) if batch_norm else x
        x = Activation(activation)(x)
        x = squeeze_excite_block(x)
        if spatial_dropout and drop_values[i] > 0:
            x = SpatialDropout3D(drop_values[i])(x)
        elif drop_values[i] > 0 and not spatial_dropout:
            x = Dropout(drop_values[i])(x)
        x = Conv3D(feature_maps[i], (3, 3, 3),
                   activation=None,
                   kernel_initializer=k_init,
                   padding='same')(x)
        x = BatchNormalization()(x) if batch_norm else x
        x = Activation(activation)(x)
        x = squeeze_excite_block(x)

    outputs = Conv3D(n_classes, (1, 1, 1), activation='sigmoid')(x)

    # Loss type
    if loss_type == "w_bce":
        model = Model(inputs=[inputs, weights], outputs=[outputs])
    else:
        model = Model(inputs=[inputs], outputs=[outputs])

    # Select the optimizer
    if optimizer == "sgd":
        opt = tf.keras.optimizers.SGD(lr=lr,
                                      momentum=0.99,
                                      decay=0.0,
                                      nesterov=False)
    elif optimizer == "adam":
        opt = tf.keras.optimizers.Adam(lr=lr,
                                       beta_1=0.9,
                                       beta_2=0.999,
                                       epsilon=None,
                                       decay=0.0,
                                       amsgrad=False)
    else:
        raise ValueError("Error: optimizer value must be 'sgd' or 'adam'")

    # Compile the model
    if loss_type == "bce":
        if n_classes > 1:
            model.compile(optimizer=opt,
                          loss='categorical_crossentropy',
                          metrics=[jaccard_index_softmax])
        else:
            model.compile(optimizer=opt,
                          loss='binary_crossentropy',
                          metrics=[jaccard_index])
    elif loss_type == "w_bce":
        model.compile(optimizer=opt,
                      loss=binary_crossentropy_weighted(weights),
                      metrics=[jaccard_index])
    elif loss_type == "w_bce_dice":
        model.compile(optimizer=opt,
                      loss=weighted_bce_dice_loss(w_dice=0.66, w_bce=0.33),
                      metrics=[jaccard_index])
    else:
        raise ValueError("'loss_type' must be 'bce', 'w_bce' or 'w_bce_dice'")

    return model
def Attention_U_Net_2D(image_shape,
                       activation='elu',
                       feature_maps=[16, 32, 64, 128, 256],
                       depth=4,
                       drop_values=[0.1, 0.1, 0.2, 0.2, 0.3],
                       spatial_dropout=False,
                       batch_norm=False,
                       k_init='he_normal',
                       loss_type="bce",
                       optimizer="sgd",
                       lr=0.002,
                       n_classes=1):
    """Create 2D U-Net with Attention blocks. 

       Based on `Attention U-Net: Learning Where to Look for the Pancreas <https://arxiv.org/abs/1804.03999>`_.
                                                                                
       Parameters
       ----------
       image_shape : 2D tuple
           Dimensions of the input image.              
                                                                                
       activation : str, optional
           Keras available activation type.        
                                                                           
       feature_maps : array of ints, optional
           Feature maps to use on each level. Must have the same length as the 
           ``depth+1``.            
                                                                           
       depth : int, optional
           Depth of the network.                        
                                                                           
       drop_values : float, optional
           Dropout value to be fixed. If no value is provided the default
           behaviour will be to select a piramidal value starting from ``0.1`` 
           and reaching ``0.3`` value.
                                                                           
       spatial_dropout : bool, optional
           Use spatial dropout instead of the `normal` dropout.                                               
                                                                           
       batch_norm : bool, optional
           Make batch normalization.      
                                                                           
       k_init : string, optional
           Kernel initialization for convolutional layers.                                                         
                                                                           
       loss_type : str, optional
           Loss type to use, three type available: ``bce`` (Binary Cross Entropy)
           , ``w_bce`` (Weighted BCE, based on weigth maps) and ``w_bce_dice``
           (Weighted loss: ``weight1*BCE + weight2*Dice``).                                              
                                                                           
       optimizer : str, optional
           Optimizer used to minimize the loss function. Posible options: 
           ``sgd`` or ``adam``.                 
                                                                           
       lr : float, optional
           Learning rate value.                          
        
       n_classes: int, optional
           Number of classes.
                                                                           
       Returns
       -------                                                                 
       model : Keras model
           Model containing the U-Net.              


       Example
       -------

       Calling this function with its default parameters returns the following  
       network:                                                                 
                                                                                
       .. image:: img/unet.png                                                  
           :width: 100%                                                         
           :align: center                                                       
                                                                                
       Image created with `PlotNeuralNet <https://github.com/HarisIqbal88/PlotNeuralNet>`_.

       That networks incorporates in skip connecions Attention Gates (AG), which
       can be seen as follows:

       .. image:: img/attention_gate.png
           :width: 100%
           :align: center
       
       Image extracted from `Attention U-Net: Learning Where to Look for the Pancreas <https://arxiv.org/abs/1804.03999>`_.
    """

    if len(feature_maps) != depth + 1:
        raise ValueError("feature_maps dimension must be equal depth+1")
    if len(drop_values) != depth + 1:
        raise ValueError("'drop_values' dimension must be equal depth+1")

    dinamic_dim = (None, ) * (len(image_shape) - 1) + (image_shape[-1], )
    x = Input(dinamic_dim)
    #x = Input(image_shape)
    inputs = x

    if loss_type == "w_bce":
        weights = Input(image_shape)

    # List used to access layers easily to make the skip connections of the U-Net
    l = []

    # ENCODER
    for i in range(depth):
        x = Conv2D(feature_maps[i], (3, 3),
                   activation=None,
                   kernel_initializer=k_init,
                   padding='same')(x)
        x = BatchNormalization()(x) if batch_norm else x
        x = Activation(activation)(x)
        if drop_values is not None:
            if spatial_dropout:
                x = SpatialDropout2D(drop_values[i])(x)
            else:
                x = Dropout(drop_values[i])(x)
        x = Conv2D(feature_maps[i], (3, 3),
                   activation=None,
                   kernel_initializer=k_init,
                   padding='same')(x)
        x = BatchNormalization()(x) if batch_norm else x
        x = Activation(activation)(x)

        l.append(x)

        x = MaxPooling2D((2, 2))(x)

    # BOTTLENECK
    x = Conv2D(feature_maps[depth], (3, 3),
               activation=None,
               kernel_initializer=k_init,
               padding='same')(x)
    x = BatchNormalization()(x) if batch_norm else x
    x = Activation(activation)(x)
    if drop_values is not None:
        if spatial_dropout:
            x = SpatialDropout2D(drop_values[depth])(x)
        else:
            x = Dropout(drop_values[depth])(x)
    x = Conv2D(feature_maps[depth], (3, 3),
               activation=None,
               kernel_initializer=k_init,
               padding='same')(x)
    x = BatchNormalization()(x) if batch_norm else x
    x = Activation(activation)(x)

    # DECODER
    for i in range(depth - 1, -1, -1):
        x = Conv2DTranspose(feature_maps[i], (2, 2),
                            strides=(2, 2),
                            padding='same')(x)
        attn = AttentionBlock(x, l[i], feature_maps[i], batch_norm)
        x = concatenate([x, attn])
        x = Conv2D(feature_maps[i], (3, 3),
                   activation=None,
                   kernel_initializer=k_init,
                   padding='same')(x)
        x = BatchNormalization()(x) if batch_norm else x
        x = Activation(activation)(x)
        if drop_values is not None:
            if spatial_dropout:
                x = SpatialDropout2D(drop_values[i])(x)
            else:
                x = Dropout(drop_values[i])(x)

        x = Conv2D(feature_maps[i], (3, 3),
                   activation=None,
                   kernel_initializer=k_init,
                   padding='same')(x)
        x = BatchNormalization()(x) if batch_norm else x
        x = Activation(activation)(x)

    outputs = Conv2D(n_classes, (1, 1), activation='sigmoid')(x)

    # Loss type
    if loss_type == "w_bce":
        model = Model(inputs=[inputs, weights], outputs=[outputs])
    else:
        model = Model(inputs=[inputs], outputs=[outputs])

    # Select the optimizer
    if optimizer == "sgd":
        opt = tf.keras.optimizers.SGD(lr=lr,
                                      momentum=0.99,
                                      decay=0.0,
                                      nesterov=False)
    elif optimizer == "adam":
        opt = tf.keras.optimizers.Adam(lr=lr,
                                       beta_1=0.9,
                                       beta_2=0.999,
                                       epsilon=None,
                                       decay=0.0,
                                       amsgrad=False)
    else:
        raise ValueError("Error: optimizer value must be 'sgd' or 'adam'")

    # Compile the model
    if loss_type == "bce":
        if n_classes > 1:
            model.compile(optimizer=opt,
                          loss='categorical_crossentropy',
                          metrics=[jaccard_index_softmax])
        else:
            model.compile(optimizer=opt,
                          loss='binary_crossentropy',
                          metrics=[jaccard_index])
    elif loss_type == "w_bce":
        model.compile(optimizer=opt,
                      loss=binary_crossentropy_weighted(weights),
                      metrics=[jaccard_index])
    elif loss_type == "w_bce_dice":
        model.compile(optimizer=opt,
                      loss=weighted_bce_dice_loss(w_dice=0.66, w_bce=0.33),
                      metrics=[jaccard_index])
    else:
        raise ValueError("'loss_type' must be 'bce', 'w_bce' or 'w_bce_dice'")

    return model