예제 #1
0
def WideResidualNetwork(depth=28,
                        width=8,
                        dropout_rate=0.0,
                        include_top=True,
                        weights='cifar10',
                        input_tensor=None,
                        input_shape=None,
                        classes=10,
                        activation='softmax'):
    """Instantiate the Wide Residual Network architecture,
        optionally loading weights pre-trained
        on CIFAR-10. Note that when using TensorFlow,
        for best performance you should set
        `image_dim_ordering="tf"` in your Keras config
        at ~/.keras/keras.json.

        The model and the weights are compatible with both
        TensorFlow and Theano. The dimension ordering
        convention used by the model is the one
        specified in your Keras config file.

        # Arguments
            depth: number or layers in the DenseNet
            width: multiplier to the ResNet width (number of filters)
            dropout_rate: dropout rate
            include_top: whether to include the fully-connected
                layer at the top of the network.
            weights: one of `None` (random initialization) or
                "cifar10" (pre-training on CIFAR-10)..
            input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
                to use as image input for the model.
            input_shape: optional shape tuple, only to be specified
                if `include_top` is False (otherwise the input shape
                has to be `(32, 32, 3)` (with `tf` dim ordering)
                or `(3, 32, 32)` (with `th` dim ordering).
                It should have exactly 3 inputs channels,
                and width and height should be no smaller than 8.
                E.g. `(200, 200, 3)` would be one valid value.
            classes: optional number of classes to classify images
                into, only to be specified if `include_top` is True, and
                if no `weights` argument is specified.

        # Returns
            A Keras model instance.
        """

    if weights not in {'cifar10', None}:
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization) or `cifar10` '
                         '(pre-training on CIFAR-10).')

    if weights == 'cifar10' and include_top and classes != 10:
        raise ValueError('If using `weights` as CIFAR 10 with `include_top`'
                         ' as true, `classes` should be 10')

    if (depth - 4) % 6 != 0:
        raise ValueError('Depth of the network must be such that (depth - 4)'
                         'should be divisible by 6.')

    # Determine proper input shape
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=32,
                                      min_size=8,
                                      data_format=K.image_dim_ordering(),
                                      require_flatten=include_top)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    x = __create_wide_residual_network(classes, img_input, include_top, depth,
                                       width, dropout_rate, activation)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input
    # Create model.
    model = Model(inputs, x, name='wide-resnet')

    # load weights
    if weights == 'cifar10':
        if (depth == 28) and (width == 8) and (dropout_rate == 0.0):
            # Default parameters match. Weights for this model exist:

            if K.image_dim_ordering() == 'th':
                if include_top:
                    h5_file = 'wide_resnet_28_8_th_dim_ordering_th_kernels.h5'
                    weights_path = get_file(h5_file,
                                            TH_WEIGHTS_PATH,
                                            cache_subdir='models')
                else:
                    h5_file = 'wide_resnet_28_8_th_dim_ordering_th_kernels_no_top.h5'
                    weights_path = get_file(h5_file,
                                            TH_WEIGHTS_PATH_NO_TOP,
                                            cache_subdir='models')

                model.load_weights(weights_path)

                if K.backend() == 'tensorflow':
                    warnings.warn(
                        'You are using the TensorFlow backend, yet you '
                        'are using the Theano '
                        'image dimension ordering convention '
                        '(`image_dim_ordering="th"`). '
                        'For best performance, set '
                        '`image_dim_ordering="tf"` in '
                        'your Keras config '
                        'at ~/.keras/keras.json.')
                    convert_all_kernels_in_model(model)
            else:
                if include_top:
                    h5_file = 'wide_resnet_28_8_tf_dim_ordering_tf_kernels.h5'
                    weights_path = get_file(h5_file,
                                            TF_WEIGHTS_PATH,
                                            cache_subdir='models')
                else:
                    h5_file = 'wide_resnet_28_8_tf_dim_ordering_tf_kernels_no_top.h5'
                    weights_path = get_file(h5_file,
                                            TF_WEIGHTS_PATH_NO_TOP,
                                            cache_subdir='models')

                model.load_weights(weights_path)

                if K.backend() == 'theano':
                    convert_all_kernels_in_model(model)

    return model
예제 #2
0
def NASNet(input_shape=None,
           penultimate_filters=4032,
           nb_blocks=6,
           stem_filters=96,
           initial_reduction=True,
           skip_reduction_layer_input=True,
           use_auxiliary_branch=False,
           filters_multiplier=2,
           dropout=0.5,
           weight_decay=5e-5,
           include_top=True,
           weights=None,
           input_tensor=None,
           pooling=None,
           classes=1000,
           default_size=None,
           activation='softmax'):
    """Instantiates a NASNet architecture.
    Note that only TensorFlow is supported for now,
    therefore it only works with the data format
    `image_data_format='channels_last'` in your Keras config
    at `~/.keras/keras.json`.

    # Arguments
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(331, 331, 3)` for NASNetLarge or
            `(224, 224, 3)` for NASNetMobile
            It should have exactly 3 inputs channels,
            and width and height should be no smaller than 32.
            E.g. `(224, 224, 3)` would be one valid value.
        penultimate_filters: number of filters in the penultimate layer.
            NASNet models use the notation `NASNet (N @ P)`, where:
                -   N is the number of blocks
                -   P is the number of penultimate filters
        nb_blocks: number of repeated blocks of the NASNet model.
            NASNet models use the notation `NASNet (N @ P)`, where:
                -   N is the number of blocks
                -   P is the number of penultimate filters
        stem_filters: number of filters in the initial stem block
        initial_reduction: Whether to perform the reduction step at the beginning
            end of the network. Set to `True` for CIFAR models.
        skip_reduction_layer_input: Determines whether to skip the reduction layers
            when calculating the previous layer to connect to.
        use_auxiliary_branch: Whether to use the auxiliary branch during
            training or evaluation.
        filters_multiplier: controls the width of the network.
            - If `filters_multiplier` < 1.0, proportionally decreases the number
                of filters in each layer.
            - If `filters_multiplier` > 1.0, proportionally increases the number
                of filters in each layer.
            - If `filters_multiplier` = 1, default number of filters from the paper
                 are used at each layer.
        dropout: dropout rate
        weight_decay: l2 regularization weight
        include_top: whether to include the fully-connected
            layer at the top of the network.
        weights: `None` (random initialization) or
            `imagenet` (ImageNet weights)
        input_tensor: optional Keras tensor (i.e. output of
            `layers.Input()`)
            to use as image input for the model.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model
                will be the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a
                2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True, and
            if no `weights` argument is specified.
        default_size: specifies the default image size of the model
        activation: Type of activation at the top layer.
            Can be one of 'softmax' or 'sigmoid'.
    # Returns
        A Keras model instance.
    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape.
        RuntimeError: If attempting to run this model with a
            backend that does not support separable convolutions.
    """
    if K.backend() != 'tensorflow':
        raise RuntimeError('Only Tensorflow backend is currently supported, '
                           'as other backends do not support '
                           'separable convolution.')

    if weights not in {'imagenet', None}:
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization) or `imagenet` '
                         '(pre-training on ImageNet).')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError('If using `weights` as ImageNet with `include_top` '
                         'as true, `classes` should be 1000')

    if default_size is None:
        default_size = 331

    # Determine proper input shape and default size.
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=default_size,
                                      min_size=32,
                                      data_format=K.image_data_format(),
                                      require_flatten=include_top or weights)

    if K.image_data_format() != 'channels_last':
        warnings.warn('The NASNet family of models is only available '
                      'for the input data format "channels_last" '
                      '(width, height, channels). '
                      'However your settings specify the default '
                      'data format "channels_first" (channels, width, height).'
                      ' You should set `image_data_format="channels_last"` '
                      'in your Keras config located at ~/.keras/keras.json. '
                      'The model being returned right now will expect inputs '
                      'to follow the "channels_last" data format.')
        K.set_image_data_format('channels_last')
        old_data_format = 'channels_first'
    else:
        old_data_format = None

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    assert penultimate_filters % 24 == 0, "`penultimate_filters` needs to be " \
                                          "divisible by 24."

    channel_dim = 1 if K.image_data_format() == 'channels_first' else -1
    filters = penultimate_filters // 24

    if initial_reduction:
        x = Conv2D(stem_filters, (3, 3), strides=(2, 2), padding='valid',
                   use_bias=False, name='stem_conv1', kernel_initializer='he_normal',
                   kernel_regularizer=l2(weight_decay))(img_input)
    else:
        x = Conv2D(stem_filters, (3, 3), strides=(1, 1), padding='same', use_bias=False,
                   name='stem_conv1', kernel_initializer='he_normal',
                   kernel_regularizer=l2(weight_decay))(img_input)

    x = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
                           name='stem_bn1')(x)

    p = None
    if initial_reduction:  # imagenet / mobile mode
        x, p = _reduction_A(x, p, filters // (filters_multiplier ** 2), weight_decay,
                            id='stem_1')
        x, p = _reduction_A(x, p, filters // filters_multiplier, weight_decay,
                            id='stem_2')

    for i in range(nb_blocks):
        x, p = _normal_A(x, p, filters, weight_decay, id='%d' % i)

    x, p0 = _reduction_A(x, p, filters * filters_multiplier, weight_decay,
                         id='reduce_%d' % nb_blocks)

    p = p0 if not skip_reduction_layer_input else p

    for i in range(nb_blocks):
        x, p = _normal_A(x, p, filters * filters_multiplier, weight_decay,
                         id='%d' % (nb_blocks + i + 1))

    auxiliary_x = None
    if not initial_reduction:  # imagenet / mobile mode
        if use_auxiliary_branch:
            auxiliary_x = _add_auxiliary_head(x, classes, weight_decay, pooling,
                                              include_top, activation)

    x, p0 = _reduction_A(x, p, filters * filters_multiplier ** 2, weight_decay,
                         id='reduce_%d' % (2 * nb_blocks))

    if initial_reduction:  # CIFAR mode
        if use_auxiliary_branch:
            auxiliary_x = _add_auxiliary_head(x, classes, weight_decay, pooling,
                                              include_top, activation)

    p = p0 if not skip_reduction_layer_input else p

    for i in range(nb_blocks):
        x, p = _normal_A(x, p, filters * filters_multiplier ** 2, weight_decay,
                         id='%d' % (2 * nb_blocks + i + 1))

    x = Activation('relu')(x)

    if include_top:
        x = GlobalAveragePooling2D()(x)
        x = Dropout(dropout)(x)
        x = Dense(classes, activation=activation,
                  kernel_regularizer=l2(weight_decay), name='predictions')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input

    # Create model.
    if use_auxiliary_branch:
        model = Model(inputs, [x, auxiliary_x], name='NASNet_with_auxiliary')
    else:
        model = Model(inputs, x, name='NASNet')

    # load weights
    if weights == 'imagenet':
        if default_size == 224:  # mobile version
            if include_top:
                if use_auxiliary_branch:
                    weight_path = NASNET_MOBILE_WEIGHT_PATH_WITH_AUXULARY
                    model_name = 'nasnet_mobile_with_aux.h5'
                else:
                    weight_path = NASNET_MOBILE_WEIGHT_PATH
                    model_name = 'nasnet_mobile.h5'
            else:
                if use_auxiliary_branch:
                    weight_path = NASNET_MOBILE_WEIGHT_PATH_WITH_AUXULARY_NO_TOP
                    model_name = 'nasnet_mobile_with_aux_no_top.h5'
                else:
                    weight_path = NASNET_MOBILE_WEIGHT_PATH_NO_TOP
                    model_name = 'nasnet_mobile_no_top.h5'

            weights_file = get_file(model_name, weight_path, cache_subdir='models')
            model.load_weights(weights_file, by_name=True)

        elif default_size == 331:  # large version
            if include_top:
                if use_auxiliary_branch:
                    weight_path = NASNET_LARGE_WEIGHT_PATH_WITH_auxiliary
                    model_name = 'nasnet_large_with_aux.h5'
                else:
                    weight_path = NASNET_LARGE_WEIGHT_PATH
                    model_name = 'nasnet_large.h5'
            else:
                if use_auxiliary_branch:
                    weight_path = NASNET_LARGE_WEIGHT_PATH_WITH_auxiliary_NO_TOP
                    model_name = 'nasnet_large_with_aux_no_top.h5'
                else:
                    weight_path = NASNET_LARGE_WEIGHT_PATH_NO_TOP
                    model_name = 'nasnet_large_no_top.h5'

            weights_file = get_file(model_name, weight_path, cache_subdir='models')
            model.load_weights(weights_file, by_name=True)

        else:
            raise ValueError('ImageNet weights can only be loaded on NASNetLarge '
                             'or NASNetMobile')

    if old_data_format:
        K.set_image_data_format(old_data_format)

    return model
예제 #3
0
def VGG16(include_top=True, weights='vggface',
          input_tensor=None, input_shape=None,
          pooling=None,
          classes=2622):
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=224,
                                      min_size=48,
                                      data_format=K.image_data_format(),
                                      require_flatten=include_top)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    # Block 1
    x = Conv2D(64, (3, 3), activation='relu', padding='same', name='conv1_1')(
        img_input)
    x = Conv2D(64, (3, 3), activation='relu', padding='same', name='conv1_2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool1')(x)

    # Block 2
    x = Conv2D(128, (3, 3), activation='relu', padding='same', name='conv2_1')(
        x)
    x = Conv2D(128, (3, 3), activation='relu', padding='same', name='conv2_2')(
        x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool2')(x)

    # Block 3
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_1')(
        x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_2')(
        x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_3')(
        x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool3')(x)

    # Block 4
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_1')(
        x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_2')(
        x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_3')(
        x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool4')(x)

    # Block 5
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_1')(
        x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_2')(
        x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_3')(
        x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool5')(x)

    if include_top:
        # Classification block
        x = Flatten(name='flatten')(x)
        x = Dense(4096, name='fc6')(x)
        x = Activation('relu', name='fc6/relu')(x)
        x = Dense(4096, name='fc7')(x)
        x = Activation('relu', name='fc7/relu')(x)
        x = Dense(classes, name='fc8')(x)
        x = Activation('softmax', name='fc8/softmax')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

            # Ensure that the model takes into account
            # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input
        # Create model.
    model = Model(inputs, x, name='vggface_vgg16')  # load weights
    if weights == 'vggface':
        if include_top:
            weights_path = get_file('rcmalli_vggface_tf_vgg16.h5',
                                    utils.
                                    VGG16_WEIGHTS_PATH,
                                    cache_subdir=utils.VGGFACE_DIR)
        else:
            weights_path = get_file('rcmalli_vggface_tf_notop_vgg16.h5',
                                    utils.VGG16_WEIGHTS_PATH_NO_TOP,
                                    cache_subdir=utils.VGGFACE_DIR)
        model.load_weights(weights_path, by_name=True)
        if K.backend() == 'theano':
            layer_utils.convert_all_kernels_in_model(model)

        if K.image_data_format() == 'channels_first':
            if include_top:
                maxpool = model.get_layer(name='pool5')
                shape = maxpool.output_shape[1:]
                dense = model.get_layer(name='fc6')
                layer_utils.convert_dense_weights_data_format(dense, shape,
                                                              'channels_first')

            if K.backend() == 'tensorflow':
                warnings.warn('You are using the TensorFlow backend, yet you '
                              'are using the Theano '
                              'image data format convention '
                              '(`image_data_format="channels_first"`). '
                              'For best performance, set '
                              '`image_data_format="channels_last"` in '
                              'your Keras config '
                              'at ~/.keras/keras.json.')
    return model
예제 #4
0
def SENET50(include_top=True, weights='vggface',
            input_tensor=None, input_shape=None,
            pooling=None,
            classes=8631):
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=224,
                                      min_size=197,
                                      data_format=K.image_data_format(),
                                      require_flatten=include_top,
                                      weights=weights)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor
    if K.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1

    bn_eps = 0.0001

    x = Conv2D(
        64, (7, 7), use_bias=False, strides=(2, 2), padding='same',
        name='conv1/7x7_s2')(img_input)
    x = BatchNormalization(axis=bn_axis, name='conv1/7x7_s2/bn',epsilon=bn_eps)(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = senet_conv_block(x, 3, [64, 64, 256], stage=2, block=1, strides=(1, 1))
    x = senet_identity_block(x, 3, [64, 64, 256], stage=2, block=2)
    x = senet_identity_block(x, 3, [64, 64, 256], stage=2, block=3)

    x = senet_conv_block(x, 3, [128, 128, 512], stage=3, block=1)
    x = senet_identity_block(x, 3, [128, 128, 512], stage=3, block=2)
    x = senet_identity_block(x, 3, [128, 128, 512], stage=3, block=3)
    x = senet_identity_block(x, 3, [128, 128, 512], stage=3, block=4)

    x = senet_conv_block(x, 3, [256, 256, 1024], stage=4, block=1)
    x = senet_identity_block(x, 3, [256, 256, 1024], stage=4, block=2)
    x = senet_identity_block(x, 3, [256, 256, 1024], stage=4, block=3)
    x = senet_identity_block(x, 3, [256, 256, 1024], stage=4, block=4)
    x = senet_identity_block(x, 3, [256, 256, 1024], stage=4, block=5)
    x = senet_identity_block(x, 3, [256, 256, 1024], stage=4, block=6)

    x = senet_conv_block(x, 3, [512, 512, 2048], stage=5, block=1)
    x = senet_identity_block(x, 3, [512, 512, 2048], stage=5, block=2)
    x = senet_identity_block(x, 3, [512, 512, 2048], stage=5, block=3)

    x = AveragePooling2D((7, 7), name='avg_pool')(x)

    if include_top:
        x = Flatten()(x)
        x = Dense(classes, activation='softmax', name='classifier')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input
    # Create model.
    model = Model(inputs, x, name='vggface_senet50')

    # load weights
    if weights == 'vggface':
        if include_top:
            weights_path = get_file('rcmalli_vggface_tf_senet50.h5',
                                    utils.SENET50_WEIGHTS_PATH,
                                    cache_subdir=utils.VGGFACE_DIR)
        else:
            weights_path = get_file('rcmalli_vggface_tf_notop_senet50.h5',
                                    utils.SENET50_WEIGHTS_PATH_NO_TOP,
                                    cache_subdir=utils.VGGFACE_DIR)
        model.load_weights(weights_path)
        if K.backend() == 'theano':
            layer_utils.convert_all_kernels_in_model(model)
            if include_top:
                maxpool = model.get_layer(name='avg_pool')
                shape = maxpool.output_shape[1:]
                dense = model.get_layer(name='classifier')
                layer_utils.convert_dense_weights_data_format(dense, shape,
                                                              'channels_first')

        if K.image_data_format() == 'channels_first' and K.backend() == 'tensorflow':
            warnings.warn('You are using the TensorFlow backend, yet you '
                          'are using the Theano '
                          'image data format convention '
                          '(`image_data_format="channels_first"`). '
                          'For best performance, set '
                          '`image_data_format="channels_last"` in '
                          'your Keras config '
                          'at ~/.keras/keras.json.')
    elif weights is not None:
        model.load_weights(weights)

    return model
예제 #5
0
def BisenetV2(include_top=True,
              input_tensor=None,
              input_shape=(224, 224, 3),
              weights=None
              ):
    if K.backend() != 'tensorflow':
        raise RuntimeError('Only tensorflow supported for now')
    name = "bisenetv2"
    input_shape = _obtain_input_shape(input_shape, default_size=224, min_size=28, require_flatten=include_top,
                                      data_format=K.image_data_format())
    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    img_input1 = Conv2D(16, kernel_size=(3, 3), strides=2, padding="same", use_bias=False, name="stem_block/conv_block_1")(img_input)
    img_input1 = BatchNormalization(axis=-1, name="stem_block/conv_block_1/bn_1")(img_input1)
    img_input1 = Activation(activation="relu", name="stem_block/conv_block_1/activate_1")(img_input1)

    branch_left_output = Conv2D(int(16/2), kernel_size=(1, 1), strides=1, padding="same", use_bias=False, name="stem_block/downsample_branch_left/1x1_conv_block")(img_input1)
    branch_left_output = BatchNormalization(axis=-1, name="stem_block/downsample_branch_left/1x1_conv_block/bn_1")(branch_left_output)
    branch_left_output = Activation(activation="relu", name="stem_block/downsample_branch_left/1x1_conv_block/activate_1")(branch_left_output)


    branch_left_output = Conv2D(16, kernel_size=(3, 3), strides=2, padding="same", use_bias=False,
                                name="stem_block/downsample_branch_left/3x3_conv_block")(branch_left_output)
    branch_left_output = BatchNormalization(axis=-1, name="stem_block/downsample_branch_left/3x3_conv_block/bn_1")(branch_left_output)
    branch_left_output = Activation(activation="relu", name="stem_block/downsample_branch_left/3x3_conv_block/activate_1")(branch_left_output)


    branch_right_output = MaxPool2D(pool_size=(3, 3), strides=2, padding='same', name="stem_block/downsample_branch_right/maxpooling_block")(img_input1)
    stem_result = Concatenate(axis=-1, name="stem_block/concate_features")([branch_left_output, branch_right_output])
    stem_result = Conv2D(16, kernel_size=(3, 3), strides=1, padding="same", use_bias=False, name="stem_block/final_conv_block")(stem_result)
    stem_result = BatchNormalization(axis=-1, name="stem_block/final_conv_block/bn_1")(stem_result)
    stem_result = Activation(activation="relu", name="stem_block/final_conv_block/activate_1")(stem_result)

    # k_reduce_mean = Lambda(lambda x: tf.reduce_mean(x, axis=[1, 2], keepdims=True, name='global_avg_pooling'))
    # embedding_result=k_reduce_mean(stem_result)
    # embedding_result = K.mean(stem_result, axis=[1, 2], keepdims=True)
    embedding_result = KerasReduceMean(axis=(1, 2), keep_dim=True, name="global_avg_pooling")(stem_result)

    embedding_result = BatchNormalization(axis=-1, name="context_embedding_block/bn")(embedding_result)
    output_channels = stem_result.get_shape().as_list()[-1]
    embedding_result = Conv2D(output_channels, kernel_size=(1, 1), strides=1, padding="same", use_bias=False,
                              name="context_embedding_block/conv_block_1")(embedding_result)
    embedding_result = BatchNormalization(axis=-1, name="context_embedding_block/conv_block_1/bn_1")(embedding_result)
    embedding_result = Activation(activation="relu", name="context_embedding_block/conv_block_1/activate_1")(embedding_result)
    embedding_result = Add(name="context_embedding_block/fused_features")([embedding_result, stem_result])
    embedding_result = Conv2D(output_channels, kernel_size=(3, 3), strides=1, padding="same", use_bias=False, name="context_embedding_block/final_conv_block")(embedding_result)


    output_channels = embedding_result.get_shape().as_list()[-1]
    gather_expansion_result = Conv2D(output_channels, kernel_size=(3, 3), strides=1, padding="same", use_bias=False,
                                     name="ge_block_with_stride_1/stride_equal_one_module/3x3_conv_block")(embedding_result)
    gather_expansion_result = BatchNormalization(axis=-1, name="ge_block_with_stride_1/stride_equal_one_module/3x3_conv_block/bn_1")(gather_expansion_result)
    gather_expansion_result = Activation(activation="relu", name="ge_block_with_stride_1/stride_equal_one_module/3x3_conv_block/activate_1")(gather_expansion_result)

    gather_expansion_result = DepthwiseConv2D(kernel_size=3, strides=1, depth_multiplier=6, padding='same',
                                              name="ge_block_with_stride_1/stride_equal_one_module/depthwise_conv_block")(gather_expansion_result)
    gather_expansion_result = BatchNormalization(axis=-1, name="ge_block_with_stride_1/stride_equal_one_module/dw_bn")(gather_expansion_result)

    gather_expansion_result = Conv2D(output_channels, kernel_size=(1, 1), strides=1, padding="same", use_bias=False,
                                     name="ge_block_with_stride_1/stride_equal_one_module/1x1_conv_block")(gather_expansion_result)
    gather_expansion_result = Add(name="ge_block_with_stride_1/stride_equal_one_module/fused_features")([embedding_result, gather_expansion_result])
    gather_expansion_result = Activation(activation="relu", name="ge_block_with_stride_1/stride_equal_one_module/ge_output")(gather_expansion_result)

    gather_expansion_proj_result = DepthwiseConv2D(kernel_size=3, depth_multiplier=1, strides=2, padding="same",
                                                   name="ge_block_with_stride_2/stride_equal_two_module/input_project_dw_conv_block")(gather_expansion_result)
    gather_expansion_proj_result = BatchNormalization(axis=-1, name="ge_block_with_stride_2/stride_equal_two_module/input_project_bn")(gather_expansion_proj_result)
    gather_expansion_proj_result = Conv2D(128, kernel_size=(1, 1), strides=1, padding="same", use_bias=False, activation=None)(gather_expansion_proj_result)
    input_tensor_channels = gather_expansion_result.get_shape().as_list()[-1]
    gather_expansion_stride2_result = Conv2D(input_tensor_channels, kernel_size=(3, 3), strides=1, padding="same",
                                             use_bias=False, name="ge_block_with_stride_2/stride_equal_two_module/3x3_conv_block")(gather_expansion_result)
    gather_expansion_stride2_result = BatchNormalization(axis=-1, name="ge_block_with_stride_2/stride_equal_two_module/3x3_conv_block/bn_1")(gather_expansion_stride2_result)
    gather_expansion_stride2_result = Activation(activation="relu", name="ge_block_with_stride_2/stride_equal_two_module/3x3_conv_block/activate_1")(gather_expansion_stride2_result)

    gather_expansion_stride2_result = DepthwiseConv2D(kernel_size=3, depth_multiplier=6, strides=2, padding="same",
                                                      name="ge_block_with_stride_2/stride_equal_two_module/depthwise_conv_block_1")(gather_expansion_stride2_result)
    gather_expansion_stride2_result = BatchNormalization(axis=-1, name="ge_block_with_stride_2/stride_equal_two_module/dw_bn_1")(gather_expansion_stride2_result)
    gather_expansion_stride2_result = DepthwiseConv2D(kernel_size=3, depth_multiplier=1, strides=1, padding="same",
                                                      name="ge_block_with_stride_2/stride_equal_two_module/depthwise_conv_block_2")(gather_expansion_stride2_result)
    gather_expansion_stride2_result = BatchNormalization(axis=-1, name="ge_block_with_stride_2/stride_equal_two_module/dw_bn_2")(gather_expansion_stride2_result)
    gather_expansion_stride2_result = Conv2D(128, kernel_size=(1, 1), strides=1, padding="same",
                                             use_bias=False, activation=None, name="ge_block_with_stride_2/stride_equal_two_module/1x1_conv_block")(gather_expansion_stride2_result)
    gather_expansion_total_result = Add(name="ge_block_with_stride_2/stride_equal_two_module/fused_features")([gather_expansion_proj_result, gather_expansion_stride2_result])
    gather_expansion_total_result = Activation(activation="relu", name="ge_block_with_stride_2/stride_equal_two_module/ge_output")(gather_expansion_total_result)


    gather_expansion_proj2_result = DepthwiseConv2D(kernel_size=3, depth_multiplier=1, strides=2, padding="same",
                                                   name="ge_block_with_stride_2_repeat/stride_equal_two_module/input_project_dw_conv_block")(gather_expansion_total_result)
    gather_expansion_proj2_result = BatchNormalization(axis=-1, name="ge_block_with_stride_2_repeat/stride_equal_two_module/input_project_bn")(gather_expansion_proj2_result)
    gather_expansion_proj2_result = Conv2D(128, kernel_size=(1, 1), strides=1, padding="same", use_bias=False, activation=None)(gather_expansion_proj2_result)
    input_tensor_channels = gather_expansion_total_result.get_shape().as_list()[-1]
    gather_expansion_stride2_result_repeat = Conv2D(input_tensor_channels, kernel_size=(3, 3), strides=1,  padding="same",
                                             use_bias=False, name="ge_block_with_stride_2_repeat/stride_equal_two_module/3x3_conv_block")(gather_expansion_total_result)
    gather_expansion_stride2_result_repeat = BatchNormalization(axis=-1, name="ge_block_with_stride_2_repeat/stride_equal_two_module/3x3_conv_block/bn_1")(gather_expansion_stride2_result_repeat)
    gather_expansion_stride2_result_repeat = Activation(activation="relu", name="ge_block_with_stride_2_repeat/stride_equal_two_module/3x3_conv_block/activate_1")(gather_expansion_stride2_result_repeat)

    gather_expansion_stride2_result_repeat = DepthwiseConv2D(kernel_size=3, depth_multiplier=6, strides=2, padding="same",
                                                      name="ge_block_with_stride_2_repeat/stride_equal_two_module/depthwise_conv_block_1")(gather_expansion_stride2_result_repeat)
    gather_expansion_stride2_result_repeat = BatchNormalization(axis=-1, name="ge_block_with_stride_2_repeat/stride_equal_two_module/dw_bn_1")(gather_expansion_stride2_result_repeat)
    gather_expansion_stride2_result_repeat = DepthwiseConv2D(kernel_size=3, depth_multiplier=1, strides=1, padding="same",
                                                      name="ge_block_with_stride_2_repeat/stride_equal_two_module/depthwise_conv_block_2")(gather_expansion_stride2_result_repeat)
    gather_expansion_stride2_result_repeat = BatchNormalization(axis=-1, name="ge_block_with_stride_2_repeat/stride_equal_two_module/dw_bn_2")(gather_expansion_stride2_result_repeat)
    gather_expansion_stride2_result_repeat = Conv2D(128, kernel_size=(1, 1), strides=1, padding="same",
                                             use_bias=False, activation=None, name="ge_block_with_stride_2_repeat/stride_equal_two_module/1x1_conv_block")(gather_expansion_stride2_result_repeat)
    gather_expansion_total_result_repeat = Add(name="ge_block_with_stride_2_repeat/stride_equal_two_module/fused_features")([gather_expansion_proj2_result, gather_expansion_stride2_result_repeat])
    gather_expansion_total_result_repeat = Activation(activation="relu", name="ge_block_with_stride_2_repeat/stride_equal_two_module/ge_output")(gather_expansion_total_result_repeat)

    detail_input_tensor = stem_result
    semantic_input_tensor = gather_expansion_total_result_repeat
    output_channels = stem_result.get_shape().as_list()[-1]
    detail_branch_remain = DepthwiseConv2D(kernel_size=3, strides=1, padding="same", depth_multiplier=1,
                                           name="guided_aggregation_block/detail_branch/3x3_dw_conv_block")(detail_input_tensor)
    detail_branch_remain = BatchNormalization(axis=-1, name="guided_aggregation_block/detail_branch/bn_1")(detail_branch_remain)
    detail_branch_remain = Conv2D(output_channels, kernel_size=(1, 1), padding="same", strides=1, use_bias=False,
                                  name="guided_aggregation_block/detail_branch/1x1_conv_block")(detail_branch_remain)

    detail_branch_downsample = Conv2D(output_channels, kernel_size=(3, 3), strides=2, use_bias=False, activation=None,
                                      padding="same", name="guided_aggregation_block/detail_branch/3x3_conv_block")(detail_input_tensor)

    detail_branch_downsample = AveragePooling2D(pool_size=(3, 3), strides=2, padding="same", name="guided_aggregation_block/detail_branch/avg_pooling_block")(detail_branch_downsample)

    semantic_branch_remain = DepthwiseConv2D(kernel_size=3, strides=1, padding="same", depth_multiplier=1,
                                             name="guided_aggregation_block/semantic_branch/3x3_dw_conv_block")(semantic_input_tensor)
    semantic_branch_remain = BatchNormalization(axis=-1, name="guided_aggregation_block/semantic_branch/bn_1")(semantic_branch_remain)
    semantic_branch_remain = Conv2D(output_channels, kernel_size=(1, 1), strides=1, use_bias=False, activation=None, padding="same",
                                    name="guided_aggregation_block/semantic_branch/1x1_conv_block")(semantic_branch_remain)
    # semantic_branch_remain = sigmoid(semantic_branch_remain)
    # keras_sigmoid = Lambda(lambda x: tf.nn.sigmoid(x, name="guided_aggregation_block/semantic_branch/semantic_remain_sigmoid"))
    # semantic_branch_remain = keras_sigmoid(semantic_branch_remain)
    semantic_branch_remain = Activation("sigmoid", name="guided_aggregation_block/semantic_branch/semantic_remain_sigmoid")(semantic_branch_remain)

    semantic_branch_upsample = Conv2D(output_channels, kernel_size=(3, 3), strides=1, padding="same", use_bias=False,
                                      activation=None, name="guided_aggregation_block/semantic_branch/3x3_conv_block")(semantic_input_tensor)
    # semantic_branch_upsample = resize_images(semantic_branch_upsample, 4, 4, data_format="channels_last", interpolation='bilinear')

    # upsample_bilinear0 = Lambda(lambda x: tf.image.resize_bilinear(x, size=stem_result.get_shape().as_list()[1:3],
    #                                                               name="guided_aggregation_block/semantic_branch/semantic_upsample_features"))
    # semantic_branch_upsample = upsample_bilinear0(semantic_branch_upsample)
    semantic_branch_upsample = BilinearUpSampling2D((4, 4), name="guided_aggregation_block/semantic_branch/semantic_upsample_features")(semantic_branch_upsample)
    semantic_branch_upsample = Activation("sigmoid", name="guided_aggregation_block/semantic_branch/semantic_branch_upsample_sigmoid")(semantic_branch_upsample)
    # keras_sigmoid_1 = Lambda(lambda x: tf.nn.sigmoid(x, name="guided_aggregation_block/semantic_branch/semantic_branch_upsample_sigmoid"))
    # semantic_branch_upsample = keras_sigmoid_1(semantic_branch_upsample)
    # semantic_branch_upsample = sigmoid(semantic_branch_upsample)

    guided_features_remain = Multiply(name="guided_aggregation_block/aggregation_features/guided_detail_features")([detail_branch_remain, semantic_branch_upsample])
    guided_features_downsample = Multiply(name="guided_aggregation_block/aggregation_features/guided_semantic_features")([detail_branch_downsample, semantic_branch_remain])

    # upsample_bilinear1 = Lambda(lambda x: tf.image.resize_bilinear(x, size=stem_result.get_shape().as_list()[1:3],
    #                                        name="guided_aggregation_block/aggregation_features/guided_upsample_features"))
    #
    # guided_features_upsample = upsample_bilinear1(guided_features_downsample)
    guided_features_upsample = BilinearUpSampling2D((4, 4), name="guided_aggregation_block/aggregation_features/guided_upsample_features")(guided_features_downsample)
    # guided_features_upsample = resize_images(guided_features_downsample, 4, 4, data_format="channels_last", interpolation='bilinear')

    guided_features = Add(name="guided_aggregation_block/aggregation_features/fused_features")([guided_features_remain, guided_features_upsample])
    guided_features = Conv2D(output_channels, kernel_size=(3, 3), strides=1, use_bias=False, padding="same",
                             name="guided_aggregation_block/aggregation_features/aggregation_feature_output")(guided_features)
    guided_features = BatchNormalization(axis=-1, name="guided_aggregation_block/aggregation_features/aggregation_feature_output/bn_1")(guided_features)
    guided_features = Activation(activation="relu", name="guided_aggregation_block/aggregation_features/aggregation_feature_output/activate_1")(guided_features)

    # input_tensor_size = [int(tmp * 4)for tmp in guided_features.get_shape().as_list()[1:3]]
    result = Conv2D(8, kernel_size=(3, 3), strides=1, use_bias=False, padding="same", name="seg_head_block/3x3_conv_block")(guided_features)
    result = BatchNormalization(axis=-1, name="seg_head_block/bn_1")(result)
    result = Activation("relu", name="seg_head_block/activate_1")(result)

    # upsample_bilinear2 = Lambda(lambda x: tf.image.resize_bilinear(x, size=input_tensor_size, name="seg_head_block/segmentation_head_logits"))
    # result = upsample_bilinear2(result)
    result = BilinearUpSampling2D((4, 4), name="seg_head_block/segmentation_head_upsample")(result)
    # result = resize_images(result, 4, 4, data_format="channels_last", interpolation='bilinear')

    result = Conv2D(1, kernel_size=(1, 1), strides=1, use_bias=False, padding="same",
                    name="seg_head_block/1x1_conv_block")(result)
    if input_tensor:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input

    model = Model(inputs, result, name=name)

    if weights:
        model.load_weights(weights, by_name=True)

    return model