Example #1
0
def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4):
    ''' Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D
    Args:
        ip: keras tensor
        nb_filter: number of filters
        compression: calculated as 1 - reduction. Reduces the number of feature maps
                    in the transition block.
        dropout_rate: dropout rate
        weight_decay: weight decay factor
    Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
    '''
    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip)
    x = Activation('relu')(x)
    x = Conv2D(int(nb_filter * compression), (1, 1),
               kernel_initializer='he_normal',
               padding='same',
               use_bias=False,
               kernel_regularizer=l2(weight_decay))(x)
    x = AveragePooling2D((2, 2), strides=(2, 2))(x)

    # squeeze and excite block
    x = squeeze_excite_block(x)

    return x
Example #2
0
def __dense_block(x, nb_layers, nb_filter, growth_rate, bottleneck=False, dropout_rate=None, weight_decay=1e-4,
                  grow_nb_filters=True, return_concat_list=False):
    ''' Build a dense_block where the output of each conv_block is fed to subsequent ones
    Args:
        x: keras tensor
        nb_layers: the number of layers of conv_block to append to the model.
        nb_filter: number of filters
        growth_rate: growth rate
        bottleneck: bottleneck block
        dropout_rate: dropout rate
        weight_decay: weight decay factor
        grow_nb_filters: flag to decide to allow number of filters to grow
        return_concat_list: return the list of feature maps along with the actual output
    Returns: keras tensor with nb_layers of conv_block appended
    '''
    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x_list = [x]

    for i in range(nb_layers):
        cb = __conv_block(x, growth_rate, bottleneck, dropout_rate, weight_decay)
        x_list.append(cb)

        x = concatenate([x, cb], axis=concat_axis)

        if grow_nb_filters:
            nb_filter += growth_rate

    # squeeze and excite block
    x = squeeze_excite_block(x)

    if return_concat_list:
        return x, nb_filter, x_list
    else:
        return x, nb_filter
Example #3
0
def identity_block(input_tensor, kernel_size, filters, stage, block):
    """The identity block is the block that has no conv layer at shortcut.
    # Arguments
        input_tensor: input tensor
        kernel_size: default 3, the kernel size of
            middle conv layer at main path
        filters: list of integers, the filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
    # Returns
        Output tensor for the block.
    """
    filters1, filters2, filters3 = filters

    x = Conv2D(filters1, (1, 1), kernel_initializer='he_normal')(input_tensor)
    x = BatchNormalization(axis=3)(x)
    x = Activation('relu')(x)

    x = Conv2D(filters2,
               kernel_size,
               padding='same',
               kernel_initializer='he_normal')(x)
    x = BatchNormalization(axis=3)(x)
    x = Activation('relu')(x)

    x = Conv2D(filters3, (1, 1), kernel_initializer='he_normal')(x)
    x = BatchNormalization(axis=3)(x)

    x = squeeze_excite_block(x)

    x = add([x, input_tensor])
    x = Activation('relu')(x)
    return x
 def f(input):
     x = Conv2D(filters=filters, kernel_size=3,
                strides=1, padding="same",
                kernel_initializer="he_normal",
                kernel_regularizer=l2(1e-4))(input)
     x = BatchNormalization(axis=3, freeze=False)(x)
     x = LeakyReLU()(x)
     
     #x1 = branch(filters//3)(x)#, dilationrate1D=1, dilationrate2D=(1,1))(x)
     #x2 = branch(filters//3)(x)#, dilationrate1D=2, dilationrate2D=(2,2))(x)
     #x3 = branch(filters//3)(x)#, dilationrate1D=3, dilationrate2D=(3,3))(x)
     
     #x_concatenated = Concatenate()([x1,x2,x3], axis=3)
     
     x = Conv2D(filters=filters, kernel_size=3,
                strides=(1,1), padding="same",
                kernel_initializer="he_normal",
                kernel_regularizer=l2(1e-4))(x)#(x_concatenated)
     x = BatchNormalization(axis=3, freeze=False)(x)
     x = squeeze_excite_block(x)
     x_res = input
     x = Add()([x, x_res])
     
     x =  LeakyReLU()(x)
     
     return x
def _resnet_block(input, filters, k=1, strides=(1, 1)):
    ''' Adds a pre-activation resnet block without bottleneck layers

    Args:
        input: input tensor
        filters: number of output filters
        k: width factor
        strides: strides of the convolution layer

    Returns: a keras tensor
    '''
    init = input
    channel_axis = 1 if K.image_data_format() == "channels_first" else -1

    x = BatchNormalization(axis=channel_axis)(input)
    x = Activation('relu')(x)

    if strides != (1, 1) or init._keras_shape[channel_axis] != filters * k:
        init = Conv2D(filters * k, (1, 1), padding='same', kernel_initializer='he_normal',
                      use_bias=False, strides=strides)(x)

    x = Conv2D(filters * k, (3, 3), padding='same', kernel_initializer='he_normal',
               use_bias=False, strides=strides)(x)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)

    x = Conv2D(filters * k, (3, 3), padding='same', kernel_initializer='he_normal',
               use_bias=False)(x)

    # squeeze and excite block
    x = squeeze_excite_block(x)

    m = add([x, init])
    return m
Example #6
0
def conv_block(input_tensor,
               kernel_size,
               filters,
               stage,
               block,
               strides=(2, 2)):
    """A block that has a conv layer at shortcut.
    # Arguments
        input_tensor: input tensor
        kernel_size: default 3, the kernel size of
            middle conv layer at main path
        filters: list of integers, the filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
        strides: Strides for the first conv layer in the block.
    # Returns
        Output tensor for the block.
    Note that from stage 3,
    the first conv layer at main path is with strides=(2, 2)
    And the shortcut should have strides=(2, 2) as well
    """
    filters1, filters2, filters3 = filters

    x = Conv2D(filters1, (1, 1),
               strides=strides,
               kernel_initializer='he_normal')(input_tensor)
    x = BatchNormalization(axis=3)(x)
    x = Activation('relu')(x)

    x = Conv2D(filters2,
               kernel_size,
               padding='same',
               kernel_initializer='he_normal')(x)
    x = BatchNormalization(axis=3)(x)
    x = Activation('relu')(x)

    x = Conv2D(filters3, (1, 1), kernel_initializer='he_normal')(x)
    x = BatchNormalization(axis=3)(x)

    shortcut = Conv2D(filters3, (1, 1),
                      strides=strides,
                      kernel_initializer='he_normal')(input_tensor)
    shortcut = BatchNormalization(axis=3)(shortcut)

    x = squeeze_excite_block(x)
    x = add([x, shortcut])
    x = Activation('relu')(x)
    return x
def __bottleneck_block(input, filters=64, cardinality=8, strides=1, weight_decay=5e-4):
    ''' Adds a bottleneck block
    Args:
        input: input tensor
        filters: number of output filters
        cardinality: cardinality factor described number of
            grouped convolutions
        strides: performs strided convolution for downsampling if > 1
        weight_decay: weight decay factor
    Returns: a keras tensor
    '''
    init = input

    grouped_channels = int(filters / cardinality)
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

    # Check if input number of filters is same as 16 * k, else create convolution2d for this input
    if K.image_data_format() == 'channels_first':
        #if init._keras_shape[1] != 2 * filters:
        init = Conv2D(filters * 2, (1, 1), padding='same', strides=(strides, strides),
                      use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(init)
        init = BatchNormalization(axis=channel_axis)(init)
    else:
        #if init._keras_shape[-1] != 2 * filters:
        init = Conv2D(filters * 2, (1, 1), padding='same', strides=(strides, strides),
                      use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(init)
        init = BatchNormalization(axis=channel_axis)(init)
    
    

    x = Conv2D(filters, (1, 1), padding='same', use_bias=False,
               kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(input)
    x = BatchNormalization(axis=channel_axis)(x)
    x = LeakyReLU()(x)

    x = __grouped_convolution_block(x, grouped_channels, cardinality, strides, weight_decay)

    x = Conv2D(filters * 2, (1, 1), padding='same', use_bias=False, kernel_initializer='he_normal',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization(axis=channel_axis)(x)

    # squeeze and excite block
    x = squeeze_excite_block(x)

    x = add([init, x])
    x = LeakyReLU()(x)

    return x
def inception_resnet_block(x, scale, block_type, block_idx, activation='relu'):
    """Adds a Inception-ResNet block with Squeeze and Excitation block at the end.
    This function builds 3 types of Inception-ResNet blocks mentioned
    in the paper, controlled by the `block_type` argument (which is the
    block name used in the official TF-slim implementation):
        - Inception-ResNet-A: `block_type='block35'`
        - Inception-ResNet-B: `block_type='block17'`
        - Inception-ResNet-C: `block_type='block8'`
    # Arguments
        x: input tensor.
        scale: scaling factor to scale the residuals (i.e., the output of
            passing `x` through an inception module) before adding them
            to the shortcut branch. Let `r` be the output from the residual branch,
            the output of this block will be `x + scale * r`.
        block_type: `'block35'`, `'block17'` or `'block8'`, determines
            the network structure in the residual branch.
        block_idx: an `int` used for generating layer names. The Inception-ResNet blocks
            are repeated many times in this network. We use `block_idx` to identify
            each of the repetitions. For example, the first Inception-ResNet-A block
            will have `block_type='block35', block_idx=0`, ane the layer names will have
            a common prefix `'block35_0'`.
        activation: activation function to use at the end of the block
            (see [activations](../activations.md)).
            When `activation=None`, no activation is applied
            (i.e., "linear" activation: `a(x) = x`).
    # Returns
        Output tensor for the block.
    # Raises
        ValueError: if `block_type` is not one of `'block35'`,
            `'block17'` or `'block8'`.
    """
    if block_type == 'block35':
        branch_0 = conv2d_bn(x, 32, 1)
        branch_1 = conv2d_bn(x, 32, 1)
        branch_1 = conv2d_bn(branch_1, 32, 3)
        branch_2 = conv2d_bn(x, 32, 1)
        branch_2 = conv2d_bn(branch_2, 48, 3)
        branch_2 = conv2d_bn(branch_2, 64, 3)
        branches = [branch_0, branch_1, branch_2]
    elif block_type == 'block17':
        branch_0 = conv2d_bn(x, 192, 1)
        branch_1 = conv2d_bn(x, 128, 1)
        branch_1 = conv2d_bn(branch_1, 160, [1, 7])
        branch_1 = conv2d_bn(branch_1, 192, [7, 1])
        branches = [branch_0, branch_1]
    elif block_type == 'block8':
        branch_0 = conv2d_bn(x, 192, 1)
        branch_1 = conv2d_bn(x, 192, 1)
        branch_1 = conv2d_bn(branch_1, 224, [1, 3])
        branch_1 = conv2d_bn(branch_1, 256, [3, 1])
        branches = [branch_0, branch_1]
    else:
        raise ValueError('Unknown Inception-ResNet block type. '
                         'Expects "block35", "block17" or "block8", '
                         'but got: ' + str(block_type))

    block_name = block_type + '_' + str(block_idx)
    channel_axis = 1 if K.image_data_format() == 'channels_first' else 3
    mixed = Concatenate(axis=channel_axis,
                        name=block_name + '_mixed')(branches)
    up = conv2d_bn(mixed,
                   K.int_shape(x)[channel_axis],
                   1,
                   activation=None,
                   use_bias=True,
                   name=block_name + '_conv')

    x = Lambda(lambda inputs, scale: inputs[0] + inputs[1] * scale,
               output_shape=K.int_shape(x)[1:],
               arguments={'scale': scale},
               name=block_name)([x, up])
    if activation is not None:
        x = Activation(activation, name=block_name + '_ac')(x)

    # squeeze and excite block
    x = squeeze_excite_block(x)
    return x
def SEInceptionResNetV2(include_top=True,
                        weights=None,
                        input_tensor=None,
                        input_shape=None,
                        pooling=None,
                        classes=1000):
    """Instantiates the SE-Inception-ResNet v2 architecture.
    Optionally loads weights pre-trained on ImageNet.
    Note that when using TensorFlow, for best performance you should
    set `"image_data_format": "channels_last"` in your Keras config
    at `~/.keras/keras.json`.
    The model and the weights are compatible with both TensorFlow and Theano
    backends (but not CNTK). The data format convention used by the model is
    the one specified in your Keras config file.
    Note that the default input image size for this model is 299x299, instead
    of 224x224 as in the VGG16 and ResNet models. Also, the input preprocessing
    function is different (i.e., do not use `imagenet_utils.preprocess_input()`
    with this model. Use `preprocess_input()` defined in this module instead).
    # Arguments
        include_top: whether to include the fully-connected
            layer at the top of the network.
        weights: one of `None` (random initialization)
            or `'imagenet'` (pre-training on ImageNet).
        input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
            to use as image input for the model.
        input_shape: optional shape tuple, only to be specified
            if `include_top` is `False` (otherwise the input shape
            has to be `(299, 299, 3)` (with `'channels_last'` data format)
            or `(3, 299, 299)` (with `'channels_first'` data format).
            It should have exactly 3 inputs channels,
            and width and height should be no smaller than 139.
            E.g. `(150, 150, 3)` would be one valid value.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model will be
                the 4D tensor output of the last convolutional layer.
            - `'avg'` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a 2D tensor.
            - `'max'` means that global max pooling will be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is `True`, and
            if no `weights` argument is specified.
    # Returns
        A Keras `Model` instance.
    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape.
        RuntimeError: If attempting to run this model with an unsupported backend.
    """
    if K.backend() in {'cntk'}:
        raise RuntimeError(K.backend() +
                           ' backend is currently unsupported for this model.')

    if weights not in {'imagenet', None}:
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization) or `imagenet` '
                         '(pre-training on ImageNet).')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError('If using `weights` as imagenet with `include_top`'
                         ' as true, `classes` should be 1000')

    # Determine proper input shape
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=299,
                                      min_size=139,
                                      data_format=K.image_data_format(),
                                      require_flatten=False,
                                      weights=weights)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    # Stem block: 35 x 35 x 192
    x = conv2d_bn(img_input, 32, 3, strides=2, padding='valid')
    x = conv2d_bn(x, 32, 3, padding='valid')
    x = conv2d_bn(x, 64, 3)
    x = MaxPooling2D(3, strides=2)(x)
    x = conv2d_bn(x, 80, 1, padding='valid')
    x = conv2d_bn(x, 192, 3, padding='valid')
    x = MaxPooling2D(3, strides=2)(x)

    # Mixed 5b (Inception-A block): 35 x 35 x 320
    branch_0 = conv2d_bn(x, 96, 1)
    branch_1 = conv2d_bn(x, 48, 1)
    branch_1 = conv2d_bn(branch_1, 64, 5)
    branch_2 = conv2d_bn(x, 64, 1)
    branch_2 = conv2d_bn(branch_2, 96, 3)
    branch_2 = conv2d_bn(branch_2, 96, 3)
    branch_pool = AveragePooling2D(3, strides=1, padding='same')(x)
    branch_pool = conv2d_bn(branch_pool, 64, 1)
    branches = [branch_0, branch_1, branch_2, branch_pool]
    channel_axis = 1 if K.image_data_format() == 'channels_first' else 3
    x = Concatenate(axis=channel_axis, name='mixed_5b')(branches)

    # squeeze and excite block
    x = squeeze_excite_block(x)

    # 10x block35 (Inception-ResNet-A block): 35 x 35 x 320
    for block_idx in range(1, 11):
        x = inception_resnet_block(x,
                                   scale=0.17,
                                   block_type='block35',
                                   block_idx=block_idx)

    # Mixed 6a (Reduction-A block): 17 x 17 x 1088
    branch_0 = conv2d_bn(x, 384, 3, strides=2, padding='valid')
    branch_1 = conv2d_bn(x, 256, 1)
    branch_1 = conv2d_bn(branch_1, 256, 3)
    branch_1 = conv2d_bn(branch_1, 384, 3, strides=2, padding='valid')
    branch_pool = MaxPooling2D(3, strides=2, padding='valid')(x)
    branches = [branch_0, branch_1, branch_pool]
    x = Concatenate(axis=channel_axis, name='mixed_6a')(branches)

    # squeeze and excite block
    x = squeeze_excite_block(x)

    # 20x block17 (Inception-ResNet-B block): 17 x 17 x 1088
    for block_idx in range(1, 21):
        x = inception_resnet_block(x,
                                   scale=0.1,
                                   block_type='block17',
                                   block_idx=block_idx)

    # Mixed 7a (Reduction-B block): 8 x 8 x 2080
    branch_0 = conv2d_bn(x, 256, 1)
    branch_0 = conv2d_bn(branch_0, 384, 3, strides=2, padding='valid')
    branch_1 = conv2d_bn(x, 256, 1)
    branch_1 = conv2d_bn(branch_1, 288, 3, strides=2, padding='valid')
    branch_2 = conv2d_bn(x, 256, 1)
    branch_2 = conv2d_bn(branch_2, 288, 3)
    branch_2 = conv2d_bn(branch_2, 320, 3, strides=2, padding='valid')
    branch_pool = MaxPooling2D(3, strides=2, padding='valid')(x)
    branches = [branch_0, branch_1, branch_2, branch_pool]
    x = Concatenate(axis=channel_axis, name='mixed_7a')(branches)

    # squeeze and excite block
    x = squeeze_excite_block(x)

    # 10x block8 (Inception-ResNet-C block): 8 x 8 x 2080
    for block_idx in range(1, 10):
        x = inception_resnet_block(x,
                                   scale=0.2,
                                   block_type='block8',
                                   block_idx=block_idx)
    x = inception_resnet_block(x,
                               scale=1.,
                               activation=None,
                               block_type='block8',
                               block_idx=10)

    # squeeze and excite block
    x = squeeze_excite_block(x)

    # Final convolution block: 8 x 8 x 1536
    x = conv2d_bn(x, 1536, 1, name='conv_7b')

    if include_top:
        # Classification block
        x = GlobalAveragePooling2D(name='avg_pool')(x)
        x = Dense(classes, activation='softmax', name='predictions')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input

    # Create model
    model = Model(inputs, x, name='se_inception_resnet_v2')
    if weights == 'imagenet':
        temp = InceptionResNetV2(include_top=False,
                                 weights='imagenet',
                                 input_tensor=input_tensor,
                                 input_shape=input_shape,
                                 pooling=pooling)
        temp.save_weights(
            './inception_resnet_v2_weights_tf_dim_ordering_tf_kernels.h5')
        model.load_weights(
            './inception_resnet_v2_weights_tf_dim_ordering_tf_kernels.h5',
            by_name=True)

    return model
Example #10
0
    def get_unet(self):

        # ***********3 inputs***********************
        inputs_modality1 = Input((self.img_rows, self.img_cols, 3),
                                 name='inputs_modality1')
        conv_m1_01 = Conv2D(64,
                            3,
                            activation='relu',
                            padding='same',
                            kernel_initializer='he_normal')(inputs_modality1)
        conv_m1_02 = Conv2D(64,
                            3,
                            activation='relu',
                            padding='same',
                            kernel_initializer='he_normal')(conv_m1_01)

        inputs_modality2 = Input((self.img_rows, self.img_cols, 3),
                                 name='inputs_modality2')
        conv_m2_01 = Conv2D(64,
                            3,
                            activation='relu',
                            padding='same',
                            kernel_initializer='he_normal')(inputs_modality2)
        conv_m2_02 = Conv2D(64,
                            3,
                            activation='relu',
                            padding='same',
                            kernel_initializer='he_normal')(conv_m2_01)

        inputs_modality3 = Input((self.img_rows, self.img_cols, 3),
                                 name='inputs_modality3')
        conv_m3_01 = Conv2D(64,
                            3,
                            activation='relu',
                            padding='same',
                            kernel_initializer='he_normal')(inputs_modality3)
        conv_m3_02 = Conv2D(64,
                            3,
                            activation='relu',
                            padding='same',
                            kernel_initializer='he_normal')(conv_m3_01)
        # ***********3 inputs end***********************

        # ***********Feature blending ***********************#
        FB_00 = Concatenate()([conv_m1_02, conv_m2_02, conv_m3_02])
        pool1 = MaxPooling2D(pool_size=(2, 2))(FB_00)

        conv2 = Conv2D(64, 3, padding='same',
                       kernel_initializer='he_normal')(pool1)
        init = conv2

        conv2 = BatchNormalization(axis=-1, epsilon=1e-3)(conv2)
        conv2 = Activation('relu')(conv2)

        conv2 = Conv2D(64, 3, padding='same',
                       kernel_initializer='he_normal')(conv2)
        # squeeze and excite block
        conv2 = squeeze_excite_block(conv2)
        conv2 = add([init, conv2])
        conv2 = BatchNormalization(axis=-1, epsilon=1e-3)(conv2)
        conv2 = Activation('relu')(conv2)

        pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

        ###

        conv3 = Conv2D(128, 3, padding='same',
                       kernel_initializer='he_normal')(pool2)
        init = conv3
        conv3 = BatchNormalization(axis=-1, epsilon=1e-3)(conv3)
        conv3 = Activation('relu')(conv3)

        conv3 = Conv2D(128, 3, padding='same',
                       kernel_initializer='he_normal')(conv3)
        # squeeze and excite block
        conv3 = squeeze_excite_block(conv3)
        conv3 = add([init, conv3])
        conv3 = BatchNormalization(axis=-1, epsilon=1e-3)(conv3)
        conv3 = Activation('relu')(conv3)

        #

        pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

        ###

        conv4 = Conv2D(256, 3, padding='same',
                       kernel_initializer='he_normal')(pool3)
        init = conv4
        conv4 = BatchNormalization(axis=-1, epsilon=1e-3)(conv4)
        conv4 = Activation('relu')(conv4)

        conv4 = Conv2D(256,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv4)
        # squeeze and excite block
        conv4 = squeeze_excite_block(conv4)
        conv4 = add([init, conv4])

        pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

        ###
        conv5 = Conv2D(512, 3, padding='same',
                       kernel_initializer='he_normal')(pool4)
        init = conv5
        conv5 = BatchNormalization(axis=-1, epsilon=1e-3)(conv5)
        conv5 = Activation('relu')(conv5)

        conv5 = Conv2D(512,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv5)
        # squeeze and excite block
        conv5 = squeeze_excite_block(conv5)
        conv5 = add([init, conv5])
        conv5 = BatchNormalization(axis=-1, epsilon=1e-3)(conv5)
        conv5 = Activation('relu')(conv5)
        #		drop4 = Dropout(0.5)(conv5)

        #		conv5 = Conv2D(512, 1, padding='same',
        #                       kernel_initializer='he_normal')(conv5)
        #		conv5 = BatchNormalization(axis=-1, epsilon=1e-3)(conv5)
        #		conv5 = Activation('relu')(conv5)
        pool5 = MaxPooling2D(pool_size=(2, 2))(conv5)
        ###

        up6 = Conv2D(512, 3, padding='same',
                     kernel_initializer='he_normal')(pool5)

        up6 = BatchNormalization(axis=-1, epsilon=1e-3)(up6)
        up6 = Activation('relu')(up6)

        up6 = Conv2DTranspose(512,
                              3,
                              strides=(2, 2),
                              padding='same',
                              kernel_initializer='he_normal')(up6)
        init = up6
        up6 = BatchNormalization(axis=-1, epsilon=1e-3)(up6)
        up6 = Activation('relu')(up6)
        up6 = Conv2D(512, 1, padding='same',
                     kernel_initializer='he_normal')(up6)
        # squeeze and excite block
        up6 = squeeze_excite_block(up6)
        up6 = add([init, up6])
        up6 = BatchNormalization(axis=-1, epsilon=1e-3)(up6)
        up6 = Activation('relu')(up6)

        merge6 = Concatenate()([conv5, up6])

        conv6 = Conv2D(256, 3, padding='same',
                       kernel_initializer='he_normal')(merge6)

        conv6 = BatchNormalization(axis=-1, epsilon=1e-3)(conv6)
        conv6 = Activation('relu')(conv6)

        up7 = Conv2DTranspose(256,
                              3,
                              strides=(2, 2),
                              padding='same',
                              kernel_initializer='he_normal')(conv6)
        init = up7
        up7 = BatchNormalization(axis=-1, epsilon=1e-3)(up7)
        up7 = Activation('relu')(up7)
        up7 = Conv2D(256, 1, padding='same',
                     kernel_initializer='he_normal')(up7)
        # squeeze and excite block
        up7 = squeeze_excite_block(up7)
        up7 = add([init, up7])
        up7 = BatchNormalization(axis=-1, epsilon=1e-3)(up7)
        up7 = Activation('relu')(up7)

        merge7 = Concatenate()([conv4, up7])

        conv7 = Conv2D(128, 3, padding='same',
                       kernel_initializer='he_normal')(merge7)
        conv7 = BatchNormalization(axis=-1, epsilon=1e-3)(conv7)
        conv7 = Activation('relu')(conv7)

        up8 = Conv2DTranspose(128,
                              3,
                              strides=(2, 2),
                              padding='same',
                              kernel_initializer='he_normal')(conv7)
        init = up8

        up8 = BatchNormalization(axis=-1, epsilon=1e-3)(up8)
        up8 = Activation('relu')(up8)
        up8 = Conv2D(128, 1, padding='same',
                     kernel_initializer='he_normal')(up8)
        # squeeze and excite block
        up8 = squeeze_excite_block(up8)
        up8 = add([init, up8])
        up8 = BatchNormalization(axis=-1, epsilon=1e-3)(up8)
        up8 = Activation('relu')(up8)

        merge8 = Concatenate()([conv3, up8])

        conv8 = Conv2D(64, 3, padding='same',
                       kernel_initializer='he_normal')(merge8)
        conv8 = BatchNormalization(axis=-1, epsilon=1e-3)(conv8)
        conv8 = Activation('relu')(conv8)
        up9 = Conv2DTranspose(64,
                              3,
                              strides=(2, 2),
                              padding='same',
                              kernel_initializer='he_normal')(conv8)
        init = up9
        up9 = BatchNormalization(axis=-1, epsilon=1e-3)(up9)
        up9 = Activation('relu')(up9)
        up9 = Conv2D(64, 1, padding='same',
                     kernel_initializer='he_normal')(up9)
        # squeeze and excite block
        up9 = squeeze_excite_block(up9)
        up9 = add([init, up9])
        up9 = BatchNormalization(axis=-1, epsilon=1e-3)(up9)
        up9 = Activation('relu')(up9)

        merge9 = Concatenate()([conv2, up9])

        conv9 = Conv2D(32, 3, padding='same',
                       kernel_initializer='he_normal')(merge9)
        conv9 = BatchNormalization(axis=-1, epsilon=1e-3)(conv9)
        conv9 = Activation('relu')(conv9)
        up10 = Conv2DTranspose(32,
                               3,
                               strides=(2, 2),
                               padding='same',
                               kernel_initializer='he_normal')(conv9)
        init = up10

        up10 = BatchNormalization(axis=-1, epsilon=1e-3)(up10)
        up10 = Activation('relu')(up10)
        up10 = Conv2D(32, 1, padding='same',
                      kernel_initializer='he_normal')(up10)
        # squeeze and excite block
        up10 = squeeze_excite_block(up10)
        up10 = add([init, up10])
        up10 = BatchNormalization(axis=-1, epsilon=1e-3)(up10)
        up10 = Activation('relu')(up10)

        merge9 = Concatenate()([FB_00, up10])

        # *************************************u-net*****************************

        # *************************************outputs***************************
        conv10_m1 = Conv2D(32,
                           3,
                           activation='relu',
                           padding='same',
                           kernel_initializer='he_normal')(merge9)
        conv10_m1 = Conv2D(32,
                           3,
                           activation='relu',
                           padding='same',
                           kernel_initializer='he_normal')(conv10_m1)
        conv10_m1 = Conv2D(1, 1, name='conv10_m1')(conv10_m1)

        # *************************************outputs******************************

        model = Model(
            input=[inputs_modality1, inputs_modality2, inputs_modality3],
            output=[conv10_m1])
        #		model.load_weights(pretrainedModel)
        model.compile(optimizer=Adam(lr=1e-4),
                      loss='mean_absolute_error',
                      metrics=['accuracy'])
        #		model.compile(optimizer = Adam(lr = 1e-4), loss = bce_dice_loss, metrics = ['accuracy'])

        return model
def focusnet():
    
    input = Input((192, 256, 3))
           
    conv1 = initial_conv_block(input) #512
    pool1 = _residual_block(basic_block, filters=32, repetitions=1, is_first_layer=False)(conv1) #256
    
    conv2 = _residual_block(basic_block, filters=64, repetitions=1, is_first_layer=True)(pool1) #256
    pool2 = _residual_block(basic_block, filters=64, repetitions=1, is_first_layer=False)(conv2) #128
    
    conv3 = _residual_block(basic_block, filters=128, repetitions=1, is_first_layer=True)(pool2) #128
    pool3 = _residual_block(basic_block, filters=128, repetitions=1, is_first_layer=False)(conv3) #64

    conv4 = _residual_block(basic_block, filters=256, repetitions=1, is_first_layer=True)(pool3) #64
    drop4 = Dropout(0.2)(conv4)
    
    up5 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop4)) #128
    merge5 = keras.layers.Concatenate()([conv3,up5]) 
    conv5 = _residual_block(basic_block, filters=256, repetitions=1, is_first_layer=True)(merge5) #128
    
    up6 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv5)) #256
    merge6 = keras.layers.Concatenate()([conv2,up6])
    conv6 = _residual_block(basic_block, filters=128, repetitions=1, is_first_layer=True)(merge6) #256
    
    up7 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6)) #512
    merge7 = keras.layers.Concatenate()([conv1,up7])
    conv7 = _residual_block(basic_block, filters=64, repetitions=1, is_first_layer=True)(merge7) #512
    

    conv1r = _conv_bn_relu(filters=64, kernel_size=(7, 7), strides=(1, 1))(input) #512
  
    block1 = _residual_block(basic_block, filters=64, repetitions=1, is_first_layer=True)(conv1r) #512
    se1 = squeeze_excite_block(block1)
    gate1 = Activation('sigmoid')(conv7)
    block1concat = keras.layers.Multiply()([se1, gate1]) #512
    block1se = squeeze_excite_block(block1concat)
    block1b = _residual_block(basic_block, filters=64, repetitions=1, is_first_layer=False)(block1se) #256
    
    block2 = _residual_block(basic_block, filters=128, repetitions=1, is_first_layer=True)(block1b) #256
    se2 = squeeze_excite_block(block2)
    gate2 = Activation('sigmoid')(conv6)
    block2concat = keras.layers.Multiply()([se2, gate2]) #256
    block2se = squeeze_excite_block(block2concat)
    block2b = _residual_block(basic_block, filters=128, repetitions=1, is_first_layer=False)(block2se) #128

    block3 = _residual_block(basic_block, filters=256, repetitions=1, is_first_layer=True)(block2b) #128
    se3 = squeeze_excite_block(block3)
    gate3 = Activation('sigmoid')(conv5)
    block3concat = keras.layers.Multiply()([se3, gate3]) #128
    block3se = squeeze_excite_block(block3concat)
    block3b = _residual_block(basic_block, filters=256, repetitions=1, is_first_layer=False)(block3se) # 64

    block4 = _residual_block(basic_block, filters=512, repetitions=1, is_first_layer=True)(block3b) #64
    block4se = squeeze_excite_block(block4)
    block4b = _residual_block(basic_block, filters=512, repetitions=1, is_first_layer=False)(block4se) #32

    up2_5 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(block4b)) #64
    merge2_5 = keras.layers.Concatenate()([block3b,up2_5])
    conv2_5 = _residual_block(basic_block, filters=256, repetitions=1, is_first_layer=True)(merge2_5) #64
    out1 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (8,8))(conv2_5))
    out1 = Conv2D(1, 1, activation = 'sigmoid', padding = 'same', kernel_initializer = 'he_normal')(out1)

    up2_6 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv2_5)) #128
    merge2_6 = keras.layers.Concatenate()([block2b,up2_6])
    conv2_6 = _residual_block(basic_block, filters=128, repetitions=1, is_first_layer=True)(merge2_6) #128
    out2 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (4,4))(conv2_6))
    out2 = Conv2D(1, 1, activation = 'sigmoid', padding = 'same', kernel_initializer = 'he_normal')(out2)
    
    up2_7 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv2_6)) #256
    merge2_7 = keras.layers.Concatenate()([block1b,up2_7])
    conv2_7 = _residual_block(basic_block, filters=64, repetitions=1, is_first_layer=True)(merge2_7) #256
    out3 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv2_7))
    out3 = Conv2D(1, 1, activation = 'sigmoid', padding = 'same', kernel_initializer = 'he_normal')(out3)
         
    up2_8 = Conv2D(32, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv2_7)) #512
    merge2_8 = keras.layers.Concatenate()([conv1r,up2_8])
    conv2_8 = _residual_block(basic_block, filters=32, repetitions=1, is_first_layer=True)(merge2_8) #512
    conv2_8 = _residual_block(basic_block, filters=16, repetitions=1, is_first_layer=True)(conv2_8)
    conv2_8 = _residual_block(basic_block, filters=4, repetitions=1, is_first_layer=True)(conv2_8)
         
    out4 = Conv2D(1, 1, activation = 'sigmoid', padding = 'same', kernel_initializer = 'he_normal')(conv2_8)

    out_concat = keras.layers.Concatenate()([out1, out2, out3, out4])

    out_concat = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(out_concat)

    out = Conv2D(1, 1, activation = 'sigmoid', padding = 'same', kernel_initializer = 'he_normal')(out_concat)
         
    model = Model(inputs=input, outputs=out)

    model.compile(optimizer = SGD(lr=0.0005, momentum=0.9, nesterov=True), loss = losses.focal_tversky, metrics = [losses.tp, losses.tn, losses.dsc, losses.jacard_coef, 'accuracy'])
         
    model.summary()

    return model
             padding='same',
             kernel_initializer='he_normal',
             name='UpConv4')(UpSampling2D(size=(2, 2), name='Up4')(conv8))
merge9 = keras.layers.Concatenate(name='Concat4')([conv1, up9])
conv9 = Residual17(128, 64, merge9)
#conv10 = Residual18(64, 16, conv9)
#conv10 = Residual19(16, 1, conv10)
#conv11 = Conv2D(1, 1, activation = 'sigmoid', name='Output')(conv10)

conv1r = _conv_bn_relu(filters=64, kernel_size=(7, 7), strides=(1, 1))(input)

block1 = _residual_block(basic_block,
                         filters=64,
                         repetitions=1,
                         is_first_layer=True)(conv1r)
se1 = squeeze_excite_block(block1)
gate1 = Activation('sigmoid')(conv9)
block1concat = keras.layers.Multiply()([se1, gate1])
block1se = squeeze_excite_block(block1concat)
block1conv1 = Conv2D(64, (1, 1),
                     padding='same',
                     kernel_initializer='he_normal')(block1se)
block1conv1 = BatchNormalization(axis=CHANNEL_AXIS)(block1conv1)
block1conv1 = layers.LeakyReLU()(block1conv1)
block1conv2 = Conv2D(64, (3, 3),
                     padding='same',
                     kernel_initializer='he_normal')(block1conv1)
block1conv2 = BatchNormalization(axis=CHANNEL_AXIS)(block1conv2)
block1conv2 = layers.LeakyReLU()(block1conv2)
block1conv3 = Conv2D(64, (1, 1),
                     padding='same',
def _depthwise_conv_block(inputs,
                          pointwise_conv_filters,
                          alpha,
                          depth_multiplier=1,
                          strides=(1, 1),
                          block_id=1):
    """Adds a depthwise convolution block.
    A depthwise convolution block consists of a depthwise conv,
    batch normalization, relu6, pointwise convolution,
    batch normalization and relu6 activation.
    # Arguments
        inputs: Input tensor of shape `(rows, cols, channels)`
            (with `channels_last` data format) or
            (channels, rows, cols) (with `channels_first` data format).
        pointwise_conv_filters: Integer, the dimensionality of the output space
            (i.e. the number output of filters in the pointwise convolution).
        alpha: controls the width of the network.
            - If `alpha` < 1.0, proportionally decreases the number
                of filters in each layer.
            - If `alpha` > 1.0, proportionally increases the number
                of filters in each layer.
            - If `alpha` = 1, default number of filters from the paper
                 are used at each layer.
        depth_multiplier: The number of depthwise convolution output channels
            for each input channel.
            The total number of depthwise convolution output
            channels will be equal to `filters_in * depth_multiplier`.
        strides: An integer or tuple/list of 2 integers,
            specifying the strides of the convolution along the width and height.
            Can be a single integer to specify the same value for
            all spatial dimensions.
            Specifying any stride value != 1 is incompatible with specifying
            any `dilation_rate` value != 1.
        block_id: Integer, a unique identification designating the block number.
    # Input shape
        4D tensor with shape:
        `(batch, channels, rows, cols)` if data_format='channels_first'
        or 4D tensor with shape:
        `(batch, rows, cols, channels)` if data_format='channels_last'.
    # Output shape
        4D tensor with shape:
        `(batch, filters, new_rows, new_cols)` if data_format='channels_first'
        or 4D tensor with shape:
        `(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
        `rows` and `cols` values might have changed due to stride.
    # Returns
        Output tensor of block.
    """
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
    pointwise_conv_filters = int(pointwise_conv_filters * alpha)

    x = DepthwiseConv2D((3, 3),
                        padding='same',
                        depth_multiplier=depth_multiplier,
                        strides=strides,
                        use_bias=False,
                        name='conv_dw_%d' % block_id)(inputs)
    x = BatchNormalization(axis=channel_axis,
                           name='conv_dw_%d_bn' % block_id)(x)
    x = Activation(relu6, name='conv_dw_%d_relu' % block_id)(x)

    x = Conv2D(pointwise_conv_filters, (1, 1),
               padding='same',
               use_bias=False,
               strides=(1, 1),
               name='conv_pw_%d' % block_id)(x)
    x = BatchNormalization(axis=channel_axis,
                           name='conv_pw_%d_bn' % block_id)(x)
    x = Activation(relu6, name='conv_pw_%d_relu' % block_id)(x)

    # squeeze and excite block
    x = squeeze_excite_block(x)
    return x
Example #14
0
conv8 = Residual15(96, 32, merge8)
conv8_1 = Residual16(32, 16, conv8)
    
up9 = Conv2D(16, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', name='UpConv4')(UpSampling2D(size = (2,2), name='Up4')(conv8_1))
merge9 = keras.layers.Concatenate(name='Concat4')([conv1,up9])
conv9 = Residual17(48, 16, merge9)
conv10 = Residual18(16, 2, conv9)
conv10 = Residual19(2, 1, conv10)
conv11 = Conv2D(1, 1, activation = 'sigmoid', name='Output')(conv10)
    

conv1r = _conv_bn_relu(filters=64, kernel_size=(7, 7), strides=(1, 1))(input)
  
block1 = _residual_block(basic_block, filters=64, repetitions=1, is_first_layer=True)(conv1r)
block1concat = keras.layers.Concatenate()([block1, conv9])
block1se = squeeze_excite_block(block1concat)
block1conv1 = Conv2D(64, (3,3), padding = 'same', kernel_initializer = 'he_normal')(block1se)
block1conv1 = BatchNormalization(axis=CHANNEL_AXIS)(block1conv1)
block1conv1 = layers.LeakyReLU()(block1conv1)
block1conv2 = Conv2D(64, (3,3), padding = 'same', kernel_initializer = 'he_normal')(block1conv1)
block1conv2 = BatchNormalization(axis=CHANNEL_AXIS)(block1conv2)
block1conv2 = layers.LeakyReLU()(block1conv2)
block1b = _residual_block(basic_block, filters=64, repetitions=1, is_first_layer=False)(block1conv2)
    
block2 = _residual_block(basic_block, filters=128, repetitions=1, is_first_layer=True)(block1b)
block2concat = keras.layers.Concatenate()([block2, conv8])
block2se = squeeze_excite_block(block2concat)
block2conv1 = Conv2D(128, (3,3), padding = 'same', kernel_initializer = 'he_normal')(block2se)
block2conv1 = BatchNormalization(axis=CHANNEL_AXIS)(block2conv1)
block2conv1 = layers.LeakyReLU()(block2conv1)
block2conv2 = Conv2D(128, (3,3), padding = 'same', kernel_initializer = 'he_normal')(block2conv1)
Example #15
0
def wnet_model(dataset,
               n_classes=5,
               im_sz=160,
               n_channels=3,
               n_filters_start=32,
               growth_factor=2):
    droprate = 0.25

    #-------------Encoder
    #Block1
    n_filters = n_filters_start
    inputs = Input((im_sz, im_sz, 1), name='input')
    conv1 = Conv2D(n_filters, (3, 3), padding='same', name='conv1_1')(inputs)
    actv1 = LeakyReLU(name='actv1_1')(conv1)
    conv1 = Conv2D(n_filters, (3, 3), padding='same', name='conv1_2')(actv1)
    actv1 = LeakyReLU(name='actv1_2')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2), name='maxpool1')(actv1)
    pool1 = squeeze_excite_block(pool1)
    #pool1 = channel_spatial_squeeze_excite(pool1)

    #Block2
    n_filters *= growth_factor
    pool1 = BatchNormalization(name='bn1')(pool1)
    conv2 = Conv2D(n_filters, (3, 3), padding='same', name='conv2_1')(pool1)
    actv2 = LeakyReLU(name='actv2_1')(conv2)
    conv2 = Conv2D(n_filters, (3, 3), padding='same', name='conv2_2')(actv2)
    actv2 = LeakyReLU(name='actv2_2')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2), name='maxpool2')(actv2)
    #pool2 = channel_spatial_squeeze_excite(pool2)
    pool2 = Dropout(droprate, name='dropout2')(pool2)
    pool2 = squeeze_excite_block(pool2)

    #Block3
    n_filters *= growth_factor
    pool2 = BatchNormalization(name='bn2')(pool2)
    conv3 = Conv2D(n_filters, (3, 3), padding='same', name='conv3_1')(pool2)
    actv3 = LeakyReLU(name='actv3_1')(conv3)
    conv3 = Conv2D(n_filters, (3, 3), padding='same', name='conv3_2')(actv3)
    actv3 = LeakyReLU(name='actv3_2')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2), name='maxpool3')(actv3)
    #pool3 = channel_spatial_squeeze_excite(pool3)
    pool3 = Dropout(droprate, name='dropout3')(pool3)
    pool3 = squeeze_excite_block(pool3)

    #Block4
    n_filters *= growth_factor
    pool3 = BatchNormalization(name='bn3')(pool3)
    conv4_0 = Conv2D(n_filters, (3, 3), padding='same', name='conv4_1')(pool3)
    actv4_0 = LeakyReLU(name='actv4_1')(conv4_0)
    conv4_0 = Conv2D(n_filters, (3, 3), padding='same',
                     name='conv4_0_2')(actv4_0)
    actv4_0 = LeakyReLU(name='actv4_2')(conv4_0)
    pool4_1 = MaxPooling2D(pool_size=(2, 2), name='maxpool4')(actv4_0)
    #pool4_1 = channel_spatial_squeeze_excite(pool4_1)
    pool4_1 = Dropout(droprate, name='dropout4')(pool4_1)
    pool4_1 = squeeze_excite_block(pool4_1)

    #Block5
    n_filters *= growth_factor
    pool4_1 = BatchNormalization(name='bn4')(pool4_1)
    conv4_1 = Conv2D(n_filters, (3, 3), padding='same',
                     name='conv5_1')(pool4_1)
    actv4_1 = LeakyReLU(name='actv5_1')(conv4_1)
    conv4_1 = Conv2D(n_filters, (3, 3), padding='same',
                     name='conv5_2')(actv4_1)
    actv4_1 = LeakyReLU(name='actv5_2')(conv4_1)
    pool4_2 = MaxPooling2D(pool_size=(2, 2), name='maxpool5')(actv4_1)
    #pool4_2 = channel_spatial_squeeze_excite(pool4_2)
    pool4_2 = Dropout(droprate, name='dropout5')(pool4_2)
    pool4_2 = squeeze_excite_block(pool4_2)

    #Block6
    n_filters *= growth_factor
    conv5 = Conv2D(n_filters, (3, 3), padding='same', name='conv6_1')(pool4_2)
    actv5 = LeakyReLU(name='actv6_1')(conv5)
    conv5 = Conv2D(n_filters, (3, 3), padding='same', name='conv6_2')(actv5)
    actv5 = LeakyReLU(name='actv6_2')(conv5)

    #-------------Decoder
    #Block7
    n_filters //= growth_factor
    up6_1 = concatenate([
        Conv2DTranspose(
            n_filters,
            (2, 2), strides=(2, 2), padding='same', name='up7')(actv5), actv4_1
    ],
                        name='concat7')
    up6_1 = BatchNormalization(name='bn7')(up6_1)
    conv6_1 = Conv2D(n_filters, (3, 3), padding='same', name='conv7_1')(up6_1)
    actv6_1 = LeakyReLU(name='actv7_1')(conv6_1)
    conv6_1 = Conv2D(n_filters, (3, 3), padding='same',
                     name='conv7_2')(actv6_1)
    actv6_1 = LeakyReLU(name='actv7_2')(conv6_1)
    #conv6_1 = channel_spatial_squeeze_excite(actv6_1)
    conv6_1 = Dropout(droprate, name='dropout7')(actv6_1)
    conv6_1 = squeeze_excite_block(conv6_1)

    #Block8
    n_filters //= growth_factor
    up6_2 = concatenate([
        Conv2DTranspose(
            n_filters, (2, 2), strides=(2, 2), padding='same',
            name='up8')(conv6_1), actv4_0
    ],
                        name='concat8')
    up6_2 = BatchNormalization(name='bn8')(up6_2)
    conv6_2 = Conv2D(n_filters, (3, 3), padding='same', name='conv8_1')(up6_2)
    actv6_2 = LeakyReLU(name='actv8_1')(conv6_2)
    conv6_2 = Conv2D(n_filters, (3, 3), padding='same',
                     name='conv8_2')(actv6_2)
    actv6_2 = LeakyReLU(name='actv8_2')(conv6_2)
    #conv6_2 = channel_spatial_squeeze_excite(actv6_2)
    conv6_2 = Dropout(droprate, name='dropout8')(actv6_2)
    conv6_2 = squeeze_excite_block(conv6_2)

    #Block9
    n_filters //= growth_factor
    up7 = concatenate([
        Conv2DTranspose(
            n_filters,
            (2, 2), strides=(2, 2), padding='same', name='up9')(conv6_2), actv3
    ],
                      name='concat9')
    up7 = BatchNormalization(name='bn9')(up7)
    conv7 = Conv2D(n_filters, (3, 3), padding='same', name='conv9_1')(up7)
    actv7 = LeakyReLU(name='actv9_1')(conv7)
    conv7 = Conv2D(n_filters, (3, 3), padding='same', name='conv9_2')(actv7)
    actv7 = LeakyReLU(name='actv9_2')(conv7)
    #conv7 = channel_spatial_squeeze_excite(actv7)
    conv7 = Dropout(droprate, name='dropout9')(actv7)
    conv7 = squeeze_excite_block(conv7)

    #Block10
    n_filters //= growth_factor
    up8 = concatenate([
        Conv2DTranspose(
            n_filters,
            (2, 2), strides=(2, 2), padding='same', name='up10')(conv7), actv2
    ],
                      name='concat10')
    up8 = BatchNormalization(name='bn10')(up8)
    conv8 = Conv2D(n_filters, (3, 3), padding='same', name='conv10_1')(up8)
    actv8 = LeakyReLU(name='actv10_1')(conv8)
    conv8 = Conv2D(n_filters, (3, 3), padding='same', name='conv10_2')(actv8)
    conv8 = LeakyReLU(name='actv10_2')(conv8)
    #conv8 = channel_spatial_squeeze_excite(conv8)
    conv8 = Dropout(droprate, name='dropout10')(conv8)
    conv8 = squeeze_excite_block(conv8)

    #Block11
    n_filters //= growth_factor
    up9 = concatenate([
        Conv2DTranspose(
            n_filters,
            (2, 2), strides=(2, 2), padding='same', name='up11')(conv8), actv1
    ],
                      name='concat11')
    conv9 = Conv2D(n_filters, (3, 3), padding='same', name='conv11_1')(up9)
    actv9 = LeakyReLU(name='actv11_1')(conv9)
    conv9 = Conv2D(n_filters, (3, 3), padding='same', name='conv11_2')(actv9)
    actv9 = LeakyReLU(name='actv11_2')(conv9)
    #actv9 = channel_spatial_squeeze_excite(actv9)
    actv9 = squeeze_excite_block(actv9)

    output1 = Conv2D(n_classes, (1, 1), activation='softmax',
                     name='output1')(actv9)

    #-------------Second UNet
    #-------------Encoder
    #Block12
    conv10 = Conv2D(n_filters, (3, 3),
                    padding='same',
                    kernel_initializer='he_uniform',
                    bias_initializer='he_uniform')(output1)
    actv10 = LeakyReLU()(conv10)
    conv10 = Conv2D(n_filters, (3, 3),
                    padding='same',
                    kernel_initializer='he_uniform',
                    bias_initializer='he_uniform')(actv10)
    actv10 = LeakyReLU()(conv10)
    pool10 = MaxPooling2D(pool_size=(2, 2))(actv10)

    #Block13
    n_filters *= growth_factor
    pool10 = BatchNormalization()(pool10)
    #Bridge
    pool10 = concatenate([pool10, conv8])
    conv11 = Conv2D(n_filters, (3, 3),
                    padding='same',
                    kernel_initializer='he_uniform',
                    bias_initializer='he_uniform')(pool10)
    actv11 = LeakyReLU()(conv11)
    conv11 = Conv2D(n_filters, (3, 3),
                    padding='same',
                    kernel_initializer='he_uniform',
                    bias_initializer='he_uniform')(actv11)
    actv11 = LeakyReLU()(conv11)
    pool11 = MaxPooling2D(pool_size=(2, 2))(actv11)
    pool11 = Dropout(droprate)(pool11)

    #Block14
    n_filters *= growth_factor
    pool11 = BatchNormalization()(pool11)
    pool11 = concatenate([pool11, conv7])
    conv12 = Conv2D(n_filters, (3, 3),
                    padding='same',
                    kernel_initializer='he_uniform',
                    bias_initializer='he_uniform')(pool11)
    actv12 = LeakyReLU()(conv12)
    conv12 = Conv2D(n_filters, (3, 3),
                    padding='same',
                    kernel_initializer='he_uniform',
                    bias_initializer='he_uniform')(actv12)
    actv12 = LeakyReLU()(conv12)
    pool12 = MaxPooling2D(pool_size=(2, 2))(actv12)
    pool12 = Dropout(droprate)(pool12)

    #Block15
    n_filters *= growth_factor
    pool12 = BatchNormalization()(pool12)
    pool12 = concatenate([pool12, conv6_2])
    conv13_0 = Conv2D(n_filters, (3, 3),
                      padding='same',
                      kernel_initializer='he_uniform',
                      bias_initializer='he_uniform')(pool12)
    actv13_0 = LeakyReLU()(conv13_0)
    conv13_0 = Conv2D(n_filters, (3, 3),
                      padding='same',
                      kernel_initializer='he_uniform',
                      bias_initializer='he_uniform')(actv13_0)
    actv13_0 = LeakyReLU()(conv13_0)
    pool13_1 = MaxPooling2D(pool_size=(2, 2))(actv13_0)
    pool13_1 = Dropout(droprate)(pool13_1)

    #Block16
    n_filters *= growth_factor
    pool13_1 = BatchNormalization()(pool13_1)
    pool13_1 = concatenate([pool13_1, conv6_1])
    conv13_1 = Conv2D(n_filters, (3, 3),
                      padding='same',
                      kernel_initializer='he_uniform',
                      bias_initializer='he_uniform')(pool13_1)
    actv13_1 = LeakyReLU()(conv13_1)
    conv13_1 = Conv2D(n_filters, (3, 3),
                      padding='same',
                      kernel_initializer='he_uniform',
                      bias_initializer='he_uniform')(actv13_1)
    actv13_1 = LeakyReLU()(conv13_1)
    pool13_2 = MaxPooling2D(pool_size=(2, 2))(actv13_1)
    pool13_2 = Dropout(droprate)(pool13_2)

    #Block17
    n_filters *= growth_factor
    pool13_2 = concatenate([pool13_2, actv5])
    conv14 = Conv2D(n_filters, (3, 3),
                    padding='same',
                    kernel_initializer='he_uniform',
                    bias_initializer='he_uniform')(pool13_2)
    actv14 = LeakyReLU()(conv14)
    conv14 = Conv2D(n_filters, (3, 3),
                    padding='same',
                    kernel_initializer='he_uniform',
                    bias_initializer='he_uniform')(actv14)
    actv14 = LeakyReLU()(conv14)

    #-------------Decoder
    #Block18
    n_filters //= growth_factor
    #Skip
    up15_1 = concatenate([
        Conv2DTranspose(n_filters, (2, 2), strides=(2, 2),
                        padding='same')(actv14),
        Add()([actv13_1, actv4_1])
    ])

    up15_1 = BatchNormalization()(up15_1)
    conv15_1 = Conv2D(n_filters, (3, 3),
                      padding='same',
                      kernel_initializer='he_uniform',
                      bias_initializer='he_uniform')(up15_1)
    actv15_1 = LeakyReLU()(conv15_1)
    conv15_1 = Conv2D(n_filters, (3, 3),
                      padding='same',
                      kernel_initializer='he_uniform',
                      bias_initializer='he_uniform')(actv15_1)
    actv15_1 = LeakyReLU()(conv15_1)
    conv15_1 = Dropout(droprate)(actv15_1)

    #Block19
    n_filters //= growth_factor
    #Skip
    up15_2 = concatenate([
        Conv2DTranspose(n_filters, (2, 2), strides=(2, 2),
                        padding='same')(conv15_1),
        Add()([actv13_0, actv4_0])
    ])
    up15_2 = BatchNormalization()(up15_2)
    conv15_2 = Conv2D(n_filters, (3, 3),
                      padding='same',
                      kernel_initializer='he_uniform',
                      bias_initializer='he_uniform')(up15_2)
    actv15_2 = LeakyReLU()(conv15_2)
    conv15_2 = Conv2D(n_filters, (3, 3),
                      padding='same',
                      kernel_initializer='he_uniform',
                      bias_initializer='he_uniform')(actv15_2)
    actv15_2 = LeakyReLU()(conv15_2)
    conv15_2 = Dropout(droprate)(actv15_2)

    #Block20
    n_filters //= growth_factor
    #Skip
    up16 = concatenate([
        Conv2DTranspose(n_filters, (2, 2), strides=(2, 2),
                        padding='same')(conv15_2),
        Add()([actv12, actv3])
    ])
    up16 = BatchNormalization()(up16)
    conv16 = Conv2D(n_filters, (3, 3),
                    padding='same',
                    kernel_initializer='he_uniform',
                    bias_initializer='he_uniform')(up16)
    actv16 = LeakyReLU()(conv16)
    conv16 = Conv2D(n_filters, (3, 3),
                    padding='same',
                    kernel_initializer='he_uniform',
                    bias_initializer='he_uniform')(actv16)
    actv16 = LeakyReLU()(conv16)
    conv16 = Dropout(droprate)(actv16)

    #Block21
    n_filters //= growth_factor
    #Skip
    up17 = concatenate([
        Conv2DTranspose(n_filters, (2, 2), strides=(2, 2),
                        padding='same')(conv16),
        Add()([actv11, actv2])
    ])
    up17 = BatchNormalization()(up17)
    conv17 = Conv2D(n_filters, (3, 3),
                    padding='same',
                    kernel_initializer='he_uniform',
                    bias_initializer='he_uniform')(up17)
    actv17 = LeakyReLU()(conv17)
    conv17 = Conv2D(n_filters, (3, 3),
                    padding='same',
                    kernel_initializer='he_uniform',
                    bias_initializer='he_uniform')(actv17)
    actv17 = LeakyReLU()(conv17)
    conv17 = Dropout(droprate)(actv17)

    #Block22
    n_filters //= growth_factor
    #Skip
    up18 = concatenate([
        Conv2DTranspose(n_filters, (2, 2), strides=(2, 2),
                        padding='same')(conv17),
        Add()([actv10, actv1])
    ])
    conv18 = Conv2D(n_filters, (3, 3),
                    padding='same',
                    kernel_initializer='he_uniform',
                    bias_initializer='he_uniform')(up18)
    actv18 = LeakyReLU()(conv18)
    conv18 = Conv2D(n_filters, (3, 3),
                    padding='same',
                    kernel_initializer='he_uniform',
                    bias_initializer='he_uniform')(actv18)
    actv18 = LeakyReLU()(conv18)

    #conv19 = Conv2D(n_classes, (1, 1), activation='sigmoid')(actv18)
    conv19 = Conv2D(n_channels, (1, 1), activation='sigmoid',
                    name='output2')(actv18)

    output2 = conv19

    model = Model(inputs=inputs, outputs=[output1, output2])

    def mean_squared_error(y_true, y_pred):
        y_true_f = K.flatten(y_true)
        y_pred_f = K.flatten(y_pred)
        return K.mean(K.square(y_pred_f - y_true_f))

    def keras_lovasz_softmax(y_true, y_pred):
        return lovasz_softmax(y_pred, y_true)

    def custom_loss(y_true, y_pred):
        return dice_coef_multilabel(
            y_true, y_pred) + losses.binary_crossentropy(y_true, y_pred)

    #n_instances_per_class = [v for k, v in get_n_instances(dataset).items()]
    #model.compile(optimizer=Adam(lr=10e-5), loss=[keras_lovasz_softmax, mean_squared_error], loss_weights=[0.95,0.05])
    model.compile(optimizer=Adam(lr=10e-5),
                  loss=[custom_loss, mean_squared_error],
                  loss_weights=[0.95, 0.05],
                  metrics=["accuracy"])
    #model.compile(optimizer=Adam(lr = 10e-5), loss=[dice_coef_multilabel, mean_squared_error], loss_weights  = [0.95, 0.05])
    #model.compile(optimizer=Adam(lr = 10e-5), loss=[categorical_class_balanced_focal_loss(n_instances_per_class, 0.99), mean_squared_error], loss_weights  = [0.95, 0.05])
    return model
Example #16
0
def SEInceptionResnetCustom():
    kernel_size = (3, 3)
    pool_size = (2, 2)
    first_filters = 32
    second_filters = 64
    third_filters = 128

    dropout_conv = 0.3
    dropout_dense = 0.5
    regularizer = l1_l2(0.0001, 0.0001)

    # Determine proper input shape
    input_shape = _obtain_input_shape((IMAGE_SIZE, IMAGE_SIZE, 3),
                                      default_size=96,
                                      min_size=96,
                                      data_format=K.image_data_format(),
                                      require_flatten=False)

    img_input = Input(shape=input_shape)

    # Stem block: 35 x 35 x 192
    x = conv2d_bn(img_input,
                  32,
                  3,
                  strides=2,
                  padding='valid',
                  activation='elu')
    x = conv2d_bn(x, 32, 3, padding='valid', activation='elu')
    x = conv2d_bn(x, 64, 3)
    x = MaxPooling2D(3, strides=2)(x)
    x = conv2d_bn(x, 80, 1, padding='valid', activation='elu')
    x = conv2d_bn(x, 192, 3, padding='valid', activation='elu')
    x = MaxPooling2D(3, strides=2)(x)

    # Mixed 5b (Inception-A block): 35 x 35 x 320
    branch_0 = conv2d_bn(x, 96, 1, activation='elu')
    branch_1 = conv2d_bn(x, 48, 1, activation='elu')
    branch_1 = conv2d_bn(branch_1, 64, 5, activation='elu')
    branch_2 = conv2d_bn(x, 64, 1, activation='elu')
    branch_2 = conv2d_bn(branch_2, 96, 3, activation='elu')
    branch_2 = conv2d_bn(branch_2, 96, 3, activation='elu')
    branch_pool = AveragePooling2D(3, strides=1, padding='same')(x)
    branch_pool = conv2d_bn(branch_pool, 64, 1, activation='elu')
    branches = [branch_0, branch_1, branch_2, branch_pool]
    channel_axis = 1 if K.image_data_format() == 'channels_first' else 3
    x = Concatenate(axis=channel_axis, name='mixed_5b')(branches)

    # squeeze and excite block
    x = squeeze_excite_block(x)

    # 10x block35 (Inception-ResNet-A block): 35 x 35 x 320
    for block_idx in range(1, 11):
        x = inception_resnet_block(x,
                                   scale=0.17,
                                   block_type='block35',
                                   block_idx=block_idx)

    # Mixed 6a (Reduction-A block): 17 x 17 x 1088
    branch_0 = conv2d_bn(x,
                         384,
                         3,
                         strides=2,
                         padding='valid',
                         activation='elu')
    branch_1 = conv2d_bn(x, 256, 1, activation='elu')
    branch_1 = conv2d_bn(branch_1, 256, 3, activation='elu')
    branch_1 = conv2d_bn(branch_1,
                         384,
                         3,
                         strides=2,
                         padding='valid',
                         activation='elu')
    branch_pool = MaxPooling2D(3, strides=2, padding='valid')(x)
    branches = [branch_0, branch_1, branch_pool]
    x = Concatenate(axis=channel_axis, name='mixed_6a')(branches)

    # squeeze and excite block
    x = squeeze_excite_block(x)

    #	model = Sequential()
    #	model.add(Conv2D(first_filters, kernel_size, activation = 'relu', input_shape = (IMAGE_SIZE, IMAGE_SIZE, 3), kernel_regularizer=regularizer))
    #	model.add(Conv2D(first_filters, kernel_size, use_bias=False, kernel_regularizer=regularizer))
    #	model.add(BatchNormalization())
    #	model.add(Activation("relu"))
    #	model.add(MaxPool2D(pool_size = pool_size))
    #	model.add(Dropout(dropout_conv))
    #
    #	model.add(Conv2D(second_filters, kernel_size, use_bias=False, kernel_regularizer=regularizer))
    #	model.add(BatchNormalization())
    #	model.add(Activation("relu"))
    #	model.add(Conv2D(second_filters, kernel_size, use_bias=False, kernel_regularizer=regularizer))
    #	model.add(BatchNormalization())
    #	model.add(Activation("relu"))
    #	model.add(MaxPool2D(pool_size = pool_size))
    #	model.add(Dropout(dropout_conv))
    #
    #	model.add(Conv2D(third_filters, kernel_size, use_bias=False, kernel_regularizer=regularizer))
    #	model.add(BatchNormalization())
    #	model.add(Activation("relu"))
    #	model.add(Conv2D(third_filters, kernel_size, use_bias=False, kernel_regularizer=regularizer))
    #	model.add(BatchNormalization())
    #	model.add(Activation("relu"))
    #	model.add(MaxPool2D(pool_size = pool_size))
    #	model.add(Dropout(dropout_conv))
    #
    #	#model.add(GlobalAveragePooling2D())
    #	model.add(Flatten())
    #	model.add(Dense(256, use_bias=False, kernel_regularizer=regularizer))
    #	model.add(BatchNormalization())
    #	model.add(Activation("relu"))
    #	model.add(Dropout(dropout_dense))

    # Final convolution block: 8 x 8 x 1536
    x = conv2d_bn(x, 816, 1, name='conv_7b')
    x = GlobalAveragePooling2D(name='avg_pool')(x)
    x = Dense(1, activation='sigmoid', name='predictions')(x)
    model = Model(inputs=img_input,
                  outputs=x,
                  name='se_inception_resnet_v2_custom')

    return model
Example #17
0
def get_unet():

    input = Input((256, 256, 3))

    conv1 = initial_conv_block(input)  #512
    pool1 = _residual_block(basic_block,
                            filters=32,
                            repetitions=1,
                            is_first_layer=False)(conv1)  #256

    conv2 = _residual_block(basic_block,
                            filters=64,
                            repetitions=1,
                            is_first_layer=True)(pool1)  #256
    pool2 = _residual_block(basic_block,
                            filters=64,
                            repetitions=1,
                            is_first_layer=False)(conv2)  #128

    conv3 = _residual_block(basic_block,
                            filters=128,
                            repetitions=1,
                            is_first_layer=True)(pool2)  #128
    pool3 = _residual_block(basic_block,
                            filters=128,
                            repetitions=1,
                            is_first_layer=False)(conv3)  #64

    conv4 = _residual_block(basic_block,
                            filters=256,
                            repetitions=1,
                            is_first_layer=True)(pool3)  #64
    drop4 = Dropout(0.2)(conv4)

    up5 = Conv2D(256,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal')(
                     UpSampling2D(size=(2, 2))(drop4))  #128
    merge5 = keras.layers.Concatenate()([conv3, up5])
    conv5 = _residual_block(basic_block,
                            filters=256,
                            repetitions=1,
                            is_first_layer=True)(merge5)  #128

    up6 = Conv2D(128,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal')(
                     UpSampling2D(size=(2, 2))(conv5))  #256
    merge6 = keras.layers.Concatenate()([conv2, up6])
    conv6 = _residual_block(basic_block,
                            filters=128,
                            repetitions=1,
                            is_first_layer=True)(merge6)  #256

    up7 = Conv2D(64,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal')(
                     UpSampling2D(size=(2, 2))(conv6))  #512
    merge7 = keras.layers.Concatenate()([conv1, up7])
    conv7 = _residual_block(basic_block,
                            filters=64,
                            repetitions=1,
                            is_first_layer=True)(merge7)  #512

    conv1r = _conv_bn_relu(filters=64, kernel_size=(7, 7),
                           strides=(1, 1))(input)  #512

    block1 = _residual_block(basic_block,
                             filters=64,
                             repetitions=1,
                             is_first_layer=True)(conv1r)  #512
    se1 = squeeze_excite_block(block1)
    gate1 = Activation('sigmoid')(conv7)
    block1concat = keras.layers.Multiply()([se1, gate1])  #512
    block1se = squeeze_excite_block(block1concat)
    block1b = _residual_block(basic_block,
                              filters=64,
                              repetitions=1,
                              is_first_layer=False)(block1se)  #256

    block2 = _residual_block(basic_block,
                             filters=128,
                             repetitions=1,
                             is_first_layer=True)(block1b)  #256
    se2 = squeeze_excite_block(block2)
    gate2 = Activation('sigmoid')(conv6)
    block2concat = keras.layers.Multiply()([se2, gate2])  #256
    block2se = squeeze_excite_block(block2concat)
    block2b = _residual_block(basic_block,
                              filters=128,
                              repetitions=1,
                              is_first_layer=False)(block2se)  #128

    block3 = _residual_block(basic_block,
                             filters=256,
                             repetitions=1,
                             is_first_layer=True)(block2b)  #128
    se3 = squeeze_excite_block(block3)
    gate3 = Activation('sigmoid')(conv5)
    block3concat = keras.layers.Multiply()([se3, gate3])  #128
    block3se = squeeze_excite_block(block3concat)
    block3b = _residual_block(basic_block,
                              filters=256,
                              repetitions=1,
                              is_first_layer=False)(block3se)  # 64

    block4 = _residual_block(basic_block,
                             filters=512,
                             repetitions=1,
                             is_first_layer=True)(block3b)  #64
    block4se = squeeze_excite_block(block4)
    block4b = _residual_block(basic_block,
                              filters=512,
                              repetitions=1,
                              is_first_layer=False)(block4se)  #32

    up2_5 = Conv2D(256,
                   2,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(
                       UpSampling2D(size=(2, 2))(block4b))  #64
    merge2_5 = keras.layers.Concatenate()([block3b, up2_5])
    conv2_5 = _residual_block(basic_block,
                              filters=256,
                              repetitions=1,
                              is_first_layer=True)(merge2_5)  #64

    up2_6 = Conv2D(128,
                   2,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(
                       UpSampling2D(size=(2, 2))(conv2_5))  #128
    merge2_6 = keras.layers.Concatenate()([block2b, up2_6])
    conv2_6 = _residual_block(basic_block,
                              filters=128,
                              repetitions=1,
                              is_first_layer=True)(merge2_6)  #128

    up2_7 = Conv2D(64,
                   2,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(
                       UpSampling2D(size=(2, 2))(conv2_6))  #256
    merge2_7 = keras.layers.Concatenate()([block1b, up2_7])
    conv2_7 = _residual_block(basic_block,
                              filters=64,
                              repetitions=1,
                              is_first_layer=True)(merge2_7)  #256

    up2_8 = Conv2D(32,
                   2,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(
                       UpSampling2D(size=(2, 2))(conv2_7))  #512
    merge2_8 = keras.layers.Concatenate()([conv1r, up2_8])
    conv2_8 = _residual_block(basic_block,
                              filters=32,
                              repetitions=1,
                              is_first_layer=True)(merge2_8)  #512
    conv2_8 = _residual_block(basic_block,
                              filters=16,
                              repetitions=1,
                              is_first_layer=True)(conv2_8)
    conv2_8 = _residual_block(basic_block,
                              filters=4,
                              repetitions=1,
                              is_first_layer=True)(conv2_8)

    out = Conv2D(1,
                 1,
                 activation='sigmoid',
                 padding='same',
                 kernel_initializer='he_normal')(conv2_8)

    model = Model(inputs=input, outputs=out)

    model.compile(optimizer=Adam(lr=1e-4),
                  loss=dice_coef_loss,
                  metrics=[dice_coef, jaccard_coef, 'acc'])

    model.summary()

    return model
    conv10 = Residual18(16, 2, conv9)
    conv10 = Residual19(2, 1, conv10)
    conv11 = Conv2D(1, 1, activation='sigmoid', name='Output')(conv10)

with tf.device('/device:GPU:3'):
    init = initial_conv_block(input, weight_decay=5e-4)

    #x1 = ResidualR(32, 64, init)    #192x192x64
    #x1 = ResidualR(64, 64, x1)
    #x1 = ResidualR(64, 64, x1)    #192x192x64
    x1 = Conv2D(64, (3, 3), padding='same',
                kernel_initializer='he_normal')(init)
    x1 = BatchNormalization()(x1)
    x1 = layers.LeakyReLU()(x1)
    x1concat = keras.layers.Concatenate()([x1, conv9])  #192x192x80
    x1se = squeeze_excite_block(x1concat)
    x1conv1 = SeparableConv2D(80, (1, 1),
                              padding='same',
                              kernel_initializer='he_normal')(x1se)
    x1conv1 = layers.LeakyReLU()(x1conv1)
    x1conv2 = Conv2D(64, (1, 1),
                     padding='same',
                     kernel_initializer='he_normal')(x1conv1)
    x1conv2 = layers.LeakyReLU()(x1conv2)
    x1pool = MaxPooling2D(pool_size=(2, 2))(x1conv2)

    #x2 = ResidualR(64, 96, x1pool)   #96x96x96
    #x2 = ResidualR(96, 96, x2)
    #x2 = ResidualR(96, 96, x2)   #96x96x96
    x2 = Conv2D(96, (3, 3), padding='same',
                kernel_initializer='he_normal')(x1pool)
Example #19
0
def SEInceptionV3(include_top=True,
                  weights=None,
                  input_tensor=None,
                  input_shape=None,
                  pooling=None,
                  classes=1000):
    """Instantiates the Squeeze and Excite Inception v3 architecture.

    # Arguments
        include_top: whether to include the fully-connected
            layer at the top of the network.
        weights: one of `None` (random initialization)
            or "imagenet" (pre-training on ImageNet).
        input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
            to use as image input for the model.
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(299, 299, 3)` (with `channels_last` data format)
            or `(3, 299, 299)` (with `channels_first` data format).
            It should have exactly 3 inputs channels,
            and width and height should be no smaller than 139.
            E.g. `(150, 150, 3)` would be one valid value.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model will be
                the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a 2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True, and
            if no `weights` argument is specified.

    # Returns
        A Keras model instance.

    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape.
    """
    if weights not in {'imagenet', None}:
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization) or `imagenet` '
                         '(pre-training on ImageNet).')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError('If using `weights` as imagenet with `include_top`'
                         ' as true, `classes` should be 1000')

    # Determine proper input shape
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=299,
                                      min_size=139,
                                      data_format=K.image_data_format(),
                                      require_flatten=include_top)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    if K.image_data_format() == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = 3

    x = _conv2d_bn(img_input, 32, 3, 3, strides=(2, 2), padding='valid')
    x = _conv2d_bn(x, 32, 3, 3, padding='valid')
    x = _conv2d_bn(x, 64, 3, 3)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = _conv2d_bn(x, 80, 1, 1, padding='valid')
    x = _conv2d_bn(x, 192, 3, 3, padding='valid')
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    # mixed 0, 1, 2: 35 x 35 x 256
    branch1x1 = _conv2d_bn(x, 64, 1, 1)

    branch5x5 = _conv2d_bn(x, 48, 1, 1)
    branch5x5 = _conv2d_bn(branch5x5, 64, 5, 5)

    branch3x3dbl = _conv2d_bn(x, 64, 1, 1)
    branch3x3dbl = _conv2d_bn(branch3x3dbl, 96, 3, 3)
    branch3x3dbl = _conv2d_bn(branch3x3dbl, 96, 3, 3)

    branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
    branch_pool = _conv2d_bn(branch_pool, 32, 1, 1)
    x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool],
                           axis=channel_axis,
                           name='mixed0')

    # squeeze and excite block
    x = squeeze_excite_block(x)

    # mixed 1: 35 x 35 x 256
    branch1x1 = _conv2d_bn(x, 64, 1, 1)

    branch5x5 = _conv2d_bn(x, 48, 1, 1)
    branch5x5 = _conv2d_bn(branch5x5, 64, 5, 5)

    branch3x3dbl = _conv2d_bn(x, 64, 1, 1)
    branch3x3dbl = _conv2d_bn(branch3x3dbl, 96, 3, 3)
    branch3x3dbl = _conv2d_bn(branch3x3dbl, 96, 3, 3)

    branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
    branch_pool = _conv2d_bn(branch_pool, 64, 1, 1)
    x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool],
                           axis=channel_axis,
                           name='mixed1')

    # squeeze and excite block
    x = squeeze_excite_block(x)

    # mixed 2: 35 x 35 x 256
    branch1x1 = _conv2d_bn(x, 64, 1, 1)

    branch5x5 = _conv2d_bn(x, 48, 1, 1)
    branch5x5 = _conv2d_bn(branch5x5, 64, 5, 5)

    branch3x3dbl = _conv2d_bn(x, 64, 1, 1)
    branch3x3dbl = _conv2d_bn(branch3x3dbl, 96, 3, 3)
    branch3x3dbl = _conv2d_bn(branch3x3dbl, 96, 3, 3)

    branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
    branch_pool = _conv2d_bn(branch_pool, 64, 1, 1)
    x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool],
                           axis=channel_axis,
                           name='mixed2')

    # squeeze and excite block
    x = squeeze_excite_block(x)

    # mixed 3: 17 x 17 x 768
    branch3x3 = _conv2d_bn(x, 384, 3, 3, strides=(2, 2), padding='valid')

    branch3x3dbl = _conv2d_bn(x, 64, 1, 1)
    branch3x3dbl = _conv2d_bn(branch3x3dbl, 96, 3, 3)
    branch3x3dbl = _conv2d_bn(branch3x3dbl,
                              96,
                              3,
                              3,
                              strides=(2, 2),
                              padding='valid')

    branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
    x = layers.concatenate([branch3x3, branch3x3dbl, branch_pool],
                           axis=channel_axis,
                           name='mixed3')

    # squeeze and excite block
    x = squeeze_excite_block(x)

    # mixed 4: 17 x 17 x 768
    branch1x1 = _conv2d_bn(x, 192, 1, 1)

    branch7x7 = _conv2d_bn(x, 128, 1, 1)
    branch7x7 = _conv2d_bn(branch7x7, 128, 1, 7)
    branch7x7 = _conv2d_bn(branch7x7, 192, 7, 1)

    branch7x7dbl = _conv2d_bn(x, 128, 1, 1)
    branch7x7dbl = _conv2d_bn(branch7x7dbl, 128, 7, 1)
    branch7x7dbl = _conv2d_bn(branch7x7dbl, 128, 1, 7)
    branch7x7dbl = _conv2d_bn(branch7x7dbl, 128, 7, 1)
    branch7x7dbl = _conv2d_bn(branch7x7dbl, 192, 1, 7)

    branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
    branch_pool = _conv2d_bn(branch_pool, 192, 1, 1)
    x = layers.concatenate([branch1x1, branch7x7, branch7x7dbl, branch_pool],
                           axis=channel_axis,
                           name='mixed4')

    # squeeze and excite block
    x = squeeze_excite_block(x)

    # mixed 5, 6: 17 x 17 x 768
    for i in range(2):
        branch1x1 = _conv2d_bn(x, 192, 1, 1)

        branch7x7 = _conv2d_bn(x, 160, 1, 1)
        branch7x7 = _conv2d_bn(branch7x7, 160, 1, 7)
        branch7x7 = _conv2d_bn(branch7x7, 192, 7, 1)

        branch7x7dbl = _conv2d_bn(x, 160, 1, 1)
        branch7x7dbl = _conv2d_bn(branch7x7dbl, 160, 7, 1)
        branch7x7dbl = _conv2d_bn(branch7x7dbl, 160, 1, 7)
        branch7x7dbl = _conv2d_bn(branch7x7dbl, 160, 7, 1)
        branch7x7dbl = _conv2d_bn(branch7x7dbl, 192, 1, 7)

        branch_pool = AveragePooling2D((3, 3), strides=(1, 1),
                                       padding='same')(x)
        branch_pool = _conv2d_bn(branch_pool, 192, 1, 1)
        x = layers.concatenate(
            [branch1x1, branch7x7, branch7x7dbl, branch_pool],
            axis=channel_axis,
            name='mixed' + str(5 + i))

        # squeeze and excite block
        x = squeeze_excite_block(x)

    # mixed 7: 17 x 17 x 768
    branch1x1 = _conv2d_bn(x, 192, 1, 1)

    branch7x7 = _conv2d_bn(x, 192, 1, 1)
    branch7x7 = _conv2d_bn(branch7x7, 192, 1, 7)
    branch7x7 = _conv2d_bn(branch7x7, 192, 7, 1)

    branch7x7dbl = _conv2d_bn(x, 192, 1, 1)
    branch7x7dbl = _conv2d_bn(branch7x7dbl, 192, 7, 1)
    branch7x7dbl = _conv2d_bn(branch7x7dbl, 192, 1, 7)
    branch7x7dbl = _conv2d_bn(branch7x7dbl, 192, 7, 1)
    branch7x7dbl = _conv2d_bn(branch7x7dbl, 192, 1, 7)

    branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
    branch_pool = _conv2d_bn(branch_pool, 192, 1, 1)
    x = layers.concatenate([branch1x1, branch7x7, branch7x7dbl, branch_pool],
                           axis=channel_axis,
                           name='mixed7')

    # squeeze and excite block
    x = squeeze_excite_block(x)

    # mixed 8: 8 x 8 x 1280
    branch3x3 = _conv2d_bn(x, 192, 1, 1)
    branch3x3 = _conv2d_bn(branch3x3,
                           320,
                           3,
                           3,
                           strides=(2, 2),
                           padding='valid')

    branch7x7x3 = _conv2d_bn(x, 192, 1, 1)
    branch7x7x3 = _conv2d_bn(branch7x7x3, 192, 1, 7)
    branch7x7x3 = _conv2d_bn(branch7x7x3, 192, 7, 1)
    branch7x7x3 = _conv2d_bn(branch7x7x3,
                             192,
                             3,
                             3,
                             strides=(2, 2),
                             padding='valid')

    branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
    x = layers.concatenate([branch3x3, branch7x7x3, branch_pool],
                           axis=channel_axis,
                           name='mixed8')

    # squeeze and excite block
    x = squeeze_excite_block(x)

    # mixed 9: 8 x 8 x 2048
    for i in range(2):
        branch1x1 = _conv2d_bn(x, 320, 1, 1)

        branch3x3 = _conv2d_bn(x, 384, 1, 1)
        branch3x3_1 = _conv2d_bn(branch3x3, 384, 1, 3)
        branch3x3_2 = _conv2d_bn(branch3x3, 384, 3, 1)
        branch3x3 = layers.concatenate([branch3x3_1, branch3x3_2],
                                       axis=channel_axis,
                                       name='mixed9_' + str(i))

        branch3x3dbl = _conv2d_bn(x, 448, 1, 1)
        branch3x3dbl = _conv2d_bn(branch3x3dbl, 384, 3, 3)
        branch3x3dbl_1 = _conv2d_bn(branch3x3dbl, 384, 1, 3)
        branch3x3dbl_2 = _conv2d_bn(branch3x3dbl, 384, 3, 1)
        branch3x3dbl = layers.concatenate([branch3x3dbl_1, branch3x3dbl_2],
                                          axis=channel_axis)

        branch_pool = AveragePooling2D((3, 3), strides=(1, 1),
                                       padding='same')(x)
        branch_pool = _conv2d_bn(branch_pool, 192, 1, 1)
        x = layers.concatenate(
            [branch1x1, branch3x3, branch3x3dbl, branch_pool],
            axis=channel_axis,
            name='mixed' + str(9 + i))

        # squeeze and excite block
        x = squeeze_excite_block(x)

    if include_top:
        # Classification block
        x = GlobalAveragePooling2D(name='avg_pool')(x)
        x = Dense(classes, activation='softmax', name='predictions')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input
    # Create model.
    model = Model(inputs, x, name='inception_v3')

    return model
Example #20
0
metrics = ['accuracy']
'''Model Construction'''
base_model = Xception(weights='imagenet',
                      input_tensor=layers.Input(shape=(image_size[0],
                                                       image_size[1], 3)),
                      include_top=False)

#last convolution layer
base_out = base_model.output

dims = base_out.shape.as_list()[1:]
feat_dim = dims[2] * pool_size * pool_size
base_channels = dims[2]

x = base_out
x = squeeze_excite_block(x)  #Added new

#self-attention
x_f = ConvSN2D(base_channels // 8, kernel_size=1, strides=1,
               padding='same')(x)  # [bs, h, w, c']
x_g = ConvSN2D(base_channels // 8, kernel_size=1, strides=1,
               padding='same')(x)  # [bs, h, w, c']
x_h = ConvSN2D(base_channels, kernel_size=1, strides=1, padding='same')(x)
x_final = SelfAttention(filters=base_channels)([x, x_f, x_g, x_h])

#x_final = base_out

full_img = layers.Lambda(
    lambda x: K.tf.image.resize_images(
        x, size=(ROIS_resolution, ROIS_resolution)),
    name='Lambda_img_1'