예제 #1
0
def depthwiseConvBlock(x,
                       features_in,
                       features_out,
                       down_sample=False,
                       kernel_size=(3, 3),
                       final_activation='relu',
                       dilation_rate=[1, 1]):
    strides = (2, 2) if down_sample else (1, 1)
    x = DepthwiseConvolution2D(int(features_in),
                               kernel_size,
                               strides=strides,
                               padding='same',
                               use_bias=False,
                               dilation_rate=dilation_rate)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(features_out), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False)(x)
    x = BatchNormalization()(x)

    if final_activation == 'leaky_relu':
        x = LeakyReLU(alpha=0.3)(x)
    else:
        x = Activation(final_activation)(x)
    return x
예제 #2
0
def _depthwise_convolution2D(input,
                             alpha,
                             deepwise_filter_size,
                             kernel_size,
                             strides,
                             padding='same',
                             bias=False):
    x = DepthwiseConvolution2D(int(deepwise_filter_size * alpha),
                               kernel_size,
                               strides=strides,
                               padding=padding,
                               use_bias=bias)(input)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    return x
예제 #3
0
def MobileNet(input_tensor=None, input_shape=None, alpha=1, shallow=False, classes=1000):
    """Instantiates the MobileNet.Network has two hyper-parameters
        which are the width of network (controlled by alpha)
        and input size.
        
        # Arguments
            input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
                to use as image input for the model.
            input_shape: optional shape tuple, only to be specified
                if `include_top` is False (otherwise the input shape
                has to be `(224, 224, 3)` (with `channels_last` data format)
                or `(3, 224, 244)` (with `channels_first` data format).
                It should have exactly 3 inputs channels,
                and width and height should be no smaller than 96.
                E.g. `(200, 200, 3)` would be one valid value.
            alpha: optional parameter of the network to change the 
                width of model.
            shallow: optional parameter for making network smaller.
            classes: optional number of classes to classify images
                into.
        # Returns
            A Keras model instance.

        """

    input_shape = _obtain_input_shape(input_shape,
                                      default_size=224,
                                      min_size=96,
                                      data_format=K.image_data_format(),
                                      include_top=True)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    x = Convolution2D(int(32 * alpha), (3, 3), strides=(2, 2), padding='same', use_bias=False)(img_input)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(32 * alpha), (3, 3), strides=(1, 1), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(64 * alpha), (1, 1), strides=(1, 1), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(64 * alpha), (3, 3), strides=(2, 2), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(128 * alpha), (1, 1), strides=(1, 1), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(128 * alpha), (3, 3), strides=(1, 1), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(128 * alpha), (1, 1), strides=(1, 1), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(128 * alpha), (3, 3), strides=(2, 2), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(256 * alpha), (1, 1), strides=(1, 1), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(256 * alpha), (3, 3), strides=(1, 1), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(256 * alpha), (1, 1), strides=(1, 1), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(256 * alpha), (3, 3), strides=(2, 2), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(512 * alpha), (1, 1), strides=(1, 1), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    if not shallow:
        for _ in range(5):
            x = DepthwiseConvolution2D(int(512 * alpha), (3, 3), strides=(1, 1), padding='same', use_bias=False)(x)
            x = BatchNormalization()(x)
            x = Activation('relu')(x)
            x = Convolution2D(int(512 * alpha), (1, 1), strides=(1, 1), padding='same', use_bias=False)(x)
            x = BatchNormalization()(x)
            x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(512 * alpha), (3, 3), strides=(2, 2), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(1024 * alpha), (1, 1), strides=(1, 1), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(1024 * alpha), (3, 3), strides=(1, 1), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(1024 * alpha), (1, 1), strides=(1, 1), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = GlobalAveragePooling2D()(x)
    out = Dense(classes, activation='softmax')(x)

    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input

    model = Model(inputs, out, name='mobilenet')

    return model
예제 #4
0
def mobilenet(input_tensor):

    if input_tensor is None:
        input_tensor = Input(shape=(300, 300, 3))

    x = ZeroPadding2D(padding=((1, 1), (1, 1)),
                      name='conv1_padding')(input_tensor)
    x = Convolution2D(32, (3, 3),
                      strides=(2, 2),
                      padding='valid',
                      use_bias=False,
                      name="conv0")(x)

    x = BatchNormalization(momentum=0.99, epsilon=0.00001, name="conv0/bn")(x)

    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(32, (3, 3),
                               strides=(1, 1),
                               padding='same',
                               use_bias=False,
                               name="conv1/dw")(x)
    x = BatchNormalization(momentum=0.99, epsilon=0.00001,
                           name="conv1/dw/bn")(x)
    x = Activation('relu')(x)
    x = Convolution2D(64, (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False,
                      name="conv1")(x)
    x = BatchNormalization(momentum=0.99, epsilon=0.00001, name="conv1/bn")(x)
    x = Activation('relu')(x)

    print("conv1 shape: ", x.shape)

    x = ZeroPadding2D(padding=((1, 1), (1, 1)), name='conv2_padding')(x)
    x = DepthwiseConvolution2D(64, (3, 3),
                               strides=(2, 2),
                               padding='valid',
                               use_bias=False,
                               name="conv2/dw")(x)
    x = BatchNormalization(momentum=0.99, epsilon=0.00001,
                           name="conv2/dw/bn")(x)
    x = Activation('relu')(x)
    x = Convolution2D(128, (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False,
                      name="conv2")(x)
    x = BatchNormalization(momentum=0.99, epsilon=0.00001, name="conv2/bn")(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(128, (3, 3),
                               strides=(1, 1),
                               padding='same',
                               use_bias=False,
                               name="conv3/dw")(x)
    x = BatchNormalization(momentum=0.99, epsilon=0.00001,
                           name="conv3/dw/bn")(x)
    x = Activation('relu')(x)
    x = Convolution2D(128, (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False,
                      name="conv3")(x)
    x = BatchNormalization(momentum=0.99, epsilon=0.00001, name="conv3/bn")(x)
    x = Activation('relu')(x)

    print("conv3 shape: ", x.shape)

    x = ZeroPadding2D(padding=((1, 1), (1, 1)), name='conv3_padding')(x)
    x = DepthwiseConvolution2D(128, (3, 3),
                               strides=(2, 2),
                               padding='valid',
                               use_bias=False,
                               name="conv4/dw")(x)
    x = BatchNormalization(momentum=0.99, epsilon=0.00001,
                           name="conv4/dw/bn")(x)
    x = Activation('relu')(x)
    x = Convolution2D(256, (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False,
                      name="conv4")(x)
    x = BatchNormalization(momentum=0.99, epsilon=0.00001, name="conv4/bn")(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(256, (3, 3),
                               strides=(1, 1),
                               padding='same',
                               use_bias=False,
                               name="conv5/dw")(x)
    x = BatchNormalization(momentum=0.99, epsilon=0.00001,
                           name="conv5/dw/bn")(x)
    x = Activation('relu')(x)
    x = Convolution2D(256, (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False,
                      name="conv5")(x)
    x = BatchNormalization(momentum=0.99, epsilon=0.00001, name="conv5/bn")(x)
    x = Activation('relu')(x)

    print("conv5 shape: ", x.shape)

    x = ZeroPadding2D(padding=((1, 1), (1, 1)), name='conv4_padding')(x)
    x = DepthwiseConvolution2D(256, (3, 3),
                               strides=(2, 2),
                               padding='valid',
                               use_bias=False,
                               name="conv6/dw")(x)
    x = BatchNormalization(momentum=0.99, epsilon=0.00001,
                           name="conv6/dw/bn")(x)
    x = Activation('relu')(x)
    x = Convolution2D(512, (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False,
                      name="conv6")(x)
    x = BatchNormalization(momentum=0.99, epsilon=0.00001, name="conv6/bn")(x)
    x = Activation('relu')(x)

    test = x

    for i in range(5):
        x = DepthwiseConvolution2D(512, (3, 3),
                                   strides=(1, 1),
                                   padding='same',
                                   use_bias=False,
                                   name=("conv" + str(7 + i) + "/dw"))(x)
        x = BatchNormalization(momentum=0.99,
                               epsilon=0.00001,
                               name=("conv" + str(7 + i) + "/dw/bn"))(x)
        x = Activation('relu')(x)
        x = Convolution2D(512, (1, 1),
                          strides=(1, 1),
                          padding='same',
                          use_bias=False,
                          name=("conv" + str(7 + i)))(x)
        x = BatchNormalization(momentum=0.99,
                               epsilon=0.00001,
                               name=("conv" + str(7 + i) + "/bn"))(x)
        x = Activation('relu')(x)

    # print ("conv11 shape: ", x.shape)
    conv11 = x

    x = ZeroPadding2D(padding=((1, 1), (1, 1)), name='conv5_padding')(x)
    x = DepthwiseConvolution2D(512, (3, 3),
                               strides=(2, 2),
                               padding='valid',
                               use_bias=False,
                               name="conv12/dw")(x)
    x = BatchNormalization(momentum=0.99, epsilon=0.00001,
                           name="conv12/dw/bn")(x)
    x = Activation('relu')(x)
    x = Convolution2D(1024, (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False,
                      name="conv12")(x)
    x = BatchNormalization(momentum=0.99, epsilon=0.00001, name="conv12/bn")(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(1024, (3, 3),
                               strides=(1, 1),
                               padding='same',
                               use_bias=False,
                               name="conv13/dw")(x)
    x = BatchNormalization(momentum=0.99, epsilon=0.00001,
                           name="conv13/dw/bn")(x)
    x = Activation('relu')(x)
    x = Convolution2D(1024, (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False,
                      name="conv13")(x)
    x = BatchNormalization(momentum=0.99, epsilon=0.00001, name="conv13/bn")(x)
    x = Activation('relu')(x)

    conv13 = x

    # print ("conv13 shape: ", x.shape)

    # model = Model(inputs=input_tensor, outputs=x)

    return [conv11, conv13, test]
def VGG_with_MobileNet(input_tensor=None,
                       input_shape=None,
                       alpha=1,
                       shallow=False,
                       classes=10):

    input_shape = _obtain_input_shape(input_shape,
                                      default_size=224,
                                      min_size=96,
                                      data_format=K.image_data_format(),
                                      include_top=True)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor
    """ Input and 3x3 conv 64 filters"""
    x = Convolution2D(int(64 * alpha), (3, 3),
                      strides=(1, 1),
                      padding='same',
                      W_regularizer=l2(0.001),
                      use_bias=False)(img_input)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.01)(x)
    """ 3x3 conv 64 filters and maxpooling by 2"""
    x = DepthwiseConvolution2D(int(64 * alpha), (3, 3),
                               strides=(2, 2),
                               padding='same',
                               W_regularizer=l2(0.001),
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.01)(x)

    x = Convolution2D(int(64 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      W_regularizer=l2(0.001),
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.01)(x)
    """ 3x3 conv 128 filters"""
    x = DepthwiseConvolution2D(int(64 * alpha), (3, 3),
                               strides=(1, 1),
                               padding='same',
                               W_regularizer=l2(0.001),
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.01)(x)

    x = Convolution2D(int(128 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      W_regularizer=l2(0.001),
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.01)(x)
    """ 3x3 conv 128 filters and maxpooling by 2"""
    x = DepthwiseConvolution2D(int(128 * alpha), (3, 3),
                               strides=(2, 2),
                               padding='same',
                               W_regularizer=l2(0.001),
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.01)(x)

    x = Convolution2D(int(128 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      W_regularizer=l2(0.001),
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.01)(x)
    """ 3x3 conv 256 filters"""
    x = DepthwiseConvolution2D(int(128 * alpha), (3, 3),
                               strides=(1, 1),
                               padding='same',
                               W_regularizer=l2(0.001),
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.01)(x)
    x = Convolution2D(int(256 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      W_regularizer=l2(0.001),
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.01)(x)
    """ 3x3 conv 256 filters"""
    x = DepthwiseConvolution2D(int(256 * alpha), (3, 3),
                               strides=(1, 1),
                               padding='same',
                               W_regularizer=l2(0.001),
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.01)(x)
    x = Convolution2D(int(256 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      W_regularizer=l2(0.001),
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.01)(x)
    """ 3x3 conv 256 filters and maxpooling by 2"""
    x = DepthwiseConvolution2D(int(256 * alpha), (3, 3),
                               strides=(2, 2),
                               padding='same',
                               W_regularizer=l2(0.001),
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.01)(x)
    x = Convolution2D(int(256 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      W_regularizer=l2(0.001),
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.01)(x)

    x = Dropout(0.05)(x)
    """ 3x3 conv 512 filters"""
    x = DepthwiseConvolution2D(int(256 * alpha), (3, 3),
                               strides=(1, 1),
                               padding='same',
                               W_regularizer=l2(0.001),
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.01)(x)
    x = Convolution2D(int(512 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      W_regularizer=l2(0.001),
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.01)(x)
    """ 3x3 conv 512 filters"""
    x = DepthwiseConvolution2D(int(512 * alpha), (3, 3),
                               strides=(1, 1),
                               padding='same',
                               W_regularizer=l2(0.001),
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.01)(x)
    x = Convolution2D(int(512 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      W_regularizer=l2(0.001),
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.01)(x)
    """ 3x3 conv 512 filters and maxpooling by 2"""
    x = DepthwiseConvolution2D(int(512 * alpha), (3, 3),
                               strides=(2, 2),
                               padding='same',
                               W_regularizer=l2(0.001),
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.01)(x)
    x = Convolution2D(int(512 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      W_regularizer=l2(0.001),
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.01)(x)

    x = Dropout(0.1)(x)
    """ 3x3 conv 512 filters"""
    x = DepthwiseConvolution2D(int(512 * alpha), (3, 3),
                               strides=(1, 1),
                               padding='same',
                               W_regularizer=l2(0.001),
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.01)(x)
    x = Convolution2D(int(512 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      W_regularizer=l2(0.001),
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.01)(x)
    """ 3x3 conv 512 filters"""
    x = DepthwiseConvolution2D(int(512 * alpha), (3, 3),
                               strides=(1, 1),
                               padding='same',
                               W_regularizer=l2(0.001),
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.01)(x)
    x = Convolution2D(int(512 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      W_regularizer=l2(0.001),
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.01)(x)
    """ 3x3 conv 512 filters and maxpooling by 2"""
    x = DepthwiseConvolution2D(int(512 * alpha), (3, 3),
                               strides=(2, 2),
                               padding='same',
                               W_regularizer=l2(0.001),
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.01)(x)
    x = Convolution2D(int(512 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      W_regularizer=l2(0.001),
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.01)(x)

    x = Dropout(0.2)(x)
    """ 7x7 conv 512 filters"""
    x = DepthwiseConvolution2D(int(512 * alpha), (7, 7),
                               strides=(1, 1),
                               padding='same',
                               W_regularizer=l2(0.001),
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.01)(x)
    x = Convolution2D(int(512 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      W_regularizer=l2(0.001),
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.01)(x)

    x = Dropout(0.3)(x)
    """ 7x7 conv 512 filters"""
    x = Convolution2D(int(512 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      W_regularizer=l2(0.001),
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    #x = Activation('relu')(x)
    x = LeakyReLU(alpha=0.01)(x)

    x = Dropout(0.4)(x)

    x = GlobalAveragePooling2D()(x)
    out = Dense(classes, activation='softmax')(x)

    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input

    model = Model(inputs, out, name='VGG_with_MobileNet')
    model.load_weights('weights.h5')
    adam = Adam(lr=0.0001,
                beta_1=0.9,
                beta_2=0.999,
                epsilon=1e-08,
                decay=0.000001)

    model.compile(adam, loss='categorical_crossentropy', metrics=['accuracy'])
    return model
예제 #6
0
def MobileNetDih4(alpha=1.0, shape=[75,75,2]):

    img_input = Input(shape)

    xd = []
    for d in Dih4(img_input, name='1'):
        x1 = Convolution2D(int(32 * alpha), (3, 3), strides=(2, 2), padding='valid', use_bias=True)(d)
        x1 = BatchNormalization()(x1)
        x1 = Activation('relu')(x1)
        x1 = DepthwiseConvolution2D(int(32 * alpha), (3, 3), strides=(1, 1), padding='same', use_bias=True)(x1)
        x1 = BatchNormalization()(x1)
        x1 = Activation('relu')(x1)
        x1 = Convolution2D(int(64 * alpha/8), (1, 1), strides=(1, 1), padding='same', use_bias=True)(x1)
        x1 = BatchNormalization()(x1)
        x1 = Activation('relu')(x1)
        xd.append(x1)
    x = concatenate(xd, axis=-1)

    xd = []
    for d in Dih4(x, name='2'):
        x1 = DepthwiseConvolution2D(int(64 * alpha), (3, 3), strides=(2, 2), padding='same', use_bias=True)(d)
        x1 = BatchNormalization()(x1)
        x1 = Activation('relu')(x1)
        x1 = Convolution2D(int(128 * alpha), (1, 1), strides=(1, 1), padding='same', use_bias=True)(x1)
        x1 = BatchNormalization()(x1)
        x1 = Activation('relu')(x1)
        x1 = DepthwiseConvolution2D(int(128 * alpha), (3, 3), strides=(1, 1), padding='same', use_bias=True)(x1)
        x1 = BatchNormalization()(x1)
        x1 = Activation('relu')(x1)
        x1 = Convolution2D(int(128 * alpha/8), (1, 1), strides=(1, 1), padding='same', use_bias=True)(x1)
        x1 = BatchNormalization()(x1)
        x1 = Activation('relu')(x1)
        xd.append(x1)
    x = concatenate(xd, axis=-1)

    xd = []
    for d in Dih4(x, name='3'):
        x1 = DepthwiseConvolution2D(int(128 * alpha), (3, 3), strides=(2, 2), padding='same', use_bias=True)(d)
        x1 = BatchNormalization()(x1)
        x1 = Activation('relu')(x1)
        x1 = Convolution2D(int(256 * alpha), (1, 1), strides=(1, 1), padding='same', use_bias=True)(x1)
        x1 = BatchNormalization()(x1)
        x1 = Activation('relu')(x1)
        x1 = DepthwiseConvolution2D(int(256 * alpha), (3, 3), strides=(1, 1), padding='same', use_bias=True)(x1)
        x1 = BatchNormalization()(x1)
        x1 = Activation('relu')(x1)
        x1 = Convolution2D(int(256 * alpha/8), (1, 1), strides=(1, 1), padding='same', use_bias=True)(x1)
        x1 = BatchNormalization()(x1)
        x1 = Activation('relu')(x1)
        xd.append(x1)
    x = concatenate(xd, axis=-1)

    xd = []
    for d in Dih4(x, name='4'):
        x1 = DepthwiseConvolution2D(int(256 * alpha), (3, 3), strides=(2, 2), padding='same', use_bias=True)(d)
        x1 = BatchNormalization()(x1)
        x1 = Activation('relu')(x1)
        x1 = Convolution2D(int(512 * alpha), (1, 1), strides=(1, 1), padding='same', use_bias=True)(x1)
        x1 = BatchNormalization()(x1)
        x1 = Activation('relu')(x1)
        x1 = DepthwiseConvolution2D(int(512 * alpha), (3, 3), strides=(1, 1), padding='same', use_bias=True)(x1)
        x1 = BatchNormalization()(x1)
        x1 = Activation('relu')(x1)
        x1 = Convolution2D(int(512 * alpha/8), (1, 1), strides=(1, 1), padding='same', use_bias=True)(x1)
        x1 = BatchNormalization()(x1)
        x1 = Activation('relu')(x1)
        xd.append(x1)
    x = concatenate(xd, axis=-1)
    x = GlobalAveragePooling2D()(x)
    x = Dropout(0.)(x)

    x1 = Dense(128)(x)
    x2 = Dense(128)(x)
    x = Maximum()([x1, x2])
    x = BatchNormalization()(x)
    out = Dense(1, activation='sigmoid')(x)

    model = Model(img_input, out, name='mobilenet')
    return model
예제 #7
0
def MobileNet(alpha=1.0, shape=[16, 224, 224, 3], nframe=16):

    img_input = Input(shape)

    x = TimeDistributed(
        Convolution2D(int(32 * alpha), (3, 3),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False))(img_input)
    x = TimeDistributed(BatchNormalization())(x)
    x = TimeDistributed(Activation('relu'))(x)

    x = TimeDistributed(
        DepthwiseConvolution2D(int(32 * alpha), (3, 3),
                               strides=(1, 1),
                               padding='same',
                               use_bias=False))(x)
    x = TimeDistributed(BatchNormalization())(x)
    x = TimeDistributed(Activation('relu'))(x)
    x = TimeDistributed(
        Convolution2D(int(64 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False))(x)
    x = TimeDistributed(BatchNormalization())(x)
    x = TimeDistributed(Activation('relu'))(x)

    x = TimeDistributed(
        DepthwiseConvolution2D(int(64 * alpha), (3, 3),
                               strides=(2, 2),
                               padding='same',
                               use_bias=False))(x)
    x = TimeDistributed(BatchNormalization())(x)
    x = TimeDistributed(Activation('relu'))(x)
    x = TimeDistributed(
        Convolution2D(int(128 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False))(x)
    x = TimeDistributed(BatchNormalization())(x)
    x = TimeDistributed(Activation('relu'))(x)

    x = TimeDistributed(
        DepthwiseConvolution2D(int(128 * alpha), (3, 3),
                               strides=(1, 1),
                               padding='same',
                               use_bias=False))(x)
    x = TimeDistributed(BatchNormalization())(x)
    x = TimeDistributed(Activation('relu'))(x)
    x = TimeDistributed(
        Convolution2D(int(128 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False))(x)
    x = TimeDistributed(BatchNormalization())(x)
    x = TimeDistributed(Activation('relu'))(x)

    x = TimeDistributed(
        DepthwiseConvolution2D(int(128 * alpha), (3, 3),
                               strides=(2, 2),
                               padding='same',
                               use_bias=False))(x)
    x = TimeDistributed(BatchNormalization())(x)
    x = TimeDistributed(Activation('relu'))(x)
    x = TimeDistributed(
        Convolution2D(int(256 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False))(x)
    x = TimeDistributed(BatchNormalization())(x)
    x = TimeDistributed(Activation('relu'))(x)

    x = TimeDistributed(
        DepthwiseConvolution2D(int(256 * alpha), (3, 3),
                               strides=(1, 1),
                               padding='same',
                               use_bias=False))(x)
    x = TimeDistributed(BatchNormalization())(x)
    x = TimeDistributed(Activation('relu'))(x)
    x = TimeDistributed(
        Convolution2D(int(256 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False))(x)
    x = TimeDistributed(BatchNormalization())(x)
    x = TimeDistributed(Activation('relu'))(x)

    x = ConvLSTM2D(int(128 * alpha),
                   kernel_size=(7, 7),
                   activation='relu',
                   dropout=0.35,
                   recurrent_dropout=0.,
                   return_sequences=False)(x)
    x = GlobalAveragePooling2D()(x)
    out = Dense(6, activation='softmax')(x)

    model = Model(img_input, out, name='mobilenet')
    return model
예제 #8
0
def MobileNet(alpha=1.0, shape=[75, 75, 2]):

    img_input = Input(shape)

    x = Convolution2D(int(32 * alpha), (3, 3),
                      strides=(2, 2),
                      padding='valid',
                      use_bias=False)(img_input)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(32 * alpha), (3, 3),
                               strides=(1, 1),
                               padding='same',
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(64 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(64 * alpha), (3, 3),
                               strides=(2, 2),
                               padding='same',
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(128 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(128 * alpha), (3, 3),
                               strides=(1, 1),
                               padding='same',
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(128 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(128 * alpha), (3, 3),
                               strides=(2, 2),
                               padding='same',
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(256 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    for _ in range(5):
        x = DepthwiseConvolution2D(int(256 * alpha), (3, 3),
                                   strides=(1, 1),
                                   padding='same',
                                   use_bias=False)(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = Convolution2D(int(256 * alpha), (1, 1),
                          strides=(1, 1),
                          padding='same',
                          use_bias=False)(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(256 * alpha), (3, 3),
                               strides=(2, 2),
                               padding='same',
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(512 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(512 * alpha), (3, 3),
                               strides=(1, 1),
                               padding='same',
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(512 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = GlobalAveragePooling2D()(x)
    x = Dropout(0.35)(x)

    x1 = Dense(128)(x)
    x2 = Dense(128)(x)
    x = Maximum()([x1, x2])
    x = BatchNormalization()(x)
    out = Dense(1, activation='sigmoid')(x)

    model = Model(img_input, out, name='mobilenet')
    return model
def MobileNet(input_tensor=None,
              input_shape=None,
              alpha=1,
              shallow=False,
              classes=1000):
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=224,
                                      min_size=96,
                                      data_format=K.image_data_format(),
                                      require_flatten=True)
    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    x = Convolution2D(int(32 * alpha), (3, 3),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(img_input)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(32 * alpha), (3, 3),
                               strides=(1, 1),
                               padding='same',
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(64 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(64 * alpha), (3, 3),
                               strides=(2, 2),
                               padding='same',
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(128 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(128 * alpha), (3, 3),
                               strides=(1, 1),
                               padding='same',
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(128 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(128 * alpha), (3, 3),
                               strides=(2, 2),
                               padding='same',
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(256 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(256 * alpha), (3, 3),
                               strides=(1, 1),
                               padding='same',
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(256 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(256 * alpha), (3, 3),
                               strides=(2, 2),
                               padding='same',
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(512 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    if not shallow:
        for _ in range(5):
            x = DepthwiseConvolution2D(int(512 * alpha), (3, 3),
                                       strides=(1, 1),
                                       padding='same',
                                       use_bias=False)(x)
            x = BatchNormalization()(x)
            x = Activation('relu')(x)
            x = Convolution2D(int(512 * alpha), (1, 1),
                              strides=(1, 1),
                              padding='same',
                              use_bias=False)(x)
            x = BatchNormalization()(x)
            x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(512 * alpha), (3, 3),
                               strides=(2, 2),
                               padding='same',
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(1024 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(1024 * alpha), (3, 3),
                               strides=(1, 1),
                               padding='same',
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(1024 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = GlobalAveragePooling2D()(x)
    out = Dense(classes, activation='softmax')(x)

    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input

    model = Model(inputs, out, name='mobilenet')

    return model
예제 #10
0
def MobileNetDR(config, alpha=1.0, shape=[224, 224, 3]):

    img_input = Input(shape)

    #x = Dih4URandom()(img_input)
    x = Convolution2D(int(32 * alpha), (3, 3),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(img_input)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(32 * alpha), (3, 3),
                               strides=(1, 1),
                               padding='same',
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(64 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Dih4URandom()(x)
    x = DepthwiseConvolution2D(int(64 * alpha), (3, 3),
                               strides=(2, 2),
                               padding='same',
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(128 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(128 * alpha), (3, 3),
                               strides=(1, 1),
                               padding='same',
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(128 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(128 * alpha), (3, 3),
                               strides=(2, 2),
                               padding='same',
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(256 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(256 * alpha), (3, 3),
                               strides=(1, 1),
                               padding='same',
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(256 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(256 * alpha), (3, 3),
                               strides=(2, 2),
                               padding='same',
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(512 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(512 * alpha), (3, 3),
                               strides=(2, 2),
                               padding='same',
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(1024 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(1024 * alpha), (3, 3),
                               strides=(1, 1),
                               padding='same',
                               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(1024 * alpha), (1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = GlobalAveragePooling2D()(x)
    x = Dropout(0.)(x)

    out = []
    for l in config:
        out.append(Dense(config[l], activation='softmax', name=str(l))(x))

    model = Model(img_input, out, name='mobilenet')
    return model
예제 #11
0
def build_model():
    shallow=True
    input_tensor=None
    input_shape=None
    classes = 20
    input_tensor = Input(shape=(target_size, target_size, 3))
    alpha=1
    x = Convolution2D(int(32 * alpha), (3, 3), strides=(2, 2), padding='same', use_bias=False)(input_tensor)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(32 * alpha), (3, 3), strides=(1, 1), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(64 * alpha), (1, 1), strides=(1, 1), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(64 * alpha), (3, 3), strides=(2, 2), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(128 * alpha), (1, 1), strides=(1, 1), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(128 * alpha), (3, 3), strides=(1, 1), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(128 * alpha), (1, 1), strides=(1, 1), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(128 * alpha), (3, 3), strides=(2, 2), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(256 * alpha), (1, 1), strides=(1, 1), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(256 * alpha), (3, 3), strides=(1, 1), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(256 * alpha), (1, 1), strides=(1, 1), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = DepthwiseConvolution2D(int(256 * alpha), (3, 3), strides=(2, 2), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Convolution2D(int(512 * alpha), (1, 1), strides=(1, 1), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)



    x = GlobalAveragePooling2D()(x)
    out = Dense(classes, activation='softmax')(x)

    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input

    model = Model(inputs, out, name='mobilenet')
    return model
예제 #12
0
def MobileNetDih4(config, alpha=1.0, shape=[224,224,3]):

    img_input = Input(shape)

    x = Convolution2D(int(32 * alpha), (3, 3), strides=(2, 2), padding='same', use_bias=False)(img_input)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    xd = []
    for d in Dih4(x, name='1'):
        x1 = DepthwiseConvolution2D(int(32 * alpha), (3, 3), strides=(1, 1), padding='same', use_bias=False)(d)
        x1 = BatchNormalization()(x1)
        x1 = Activation('relu')(x1)
        x1 = Convolution2D(int(64 * alpha/4), (1, 1), strides=(1, 1), padding='same', use_bias=False)(x1)
        x1 = BatchNormalization()(x1)
        x1 = Activation('relu')(x1)
        xd.append(x1)
    x = concatenate(xd, axis=-1)

    xd = []
    for d in Dih4(x, name='2'):
        x1 = DepthwiseConvolution2D(int(64 * alpha), (3, 3), strides=(2, 2), padding='same', use_bias=False)(d)
        x1 = BatchNormalization()(x1)
        x1 = Activation('relu')(x1)
        x1 = Convolution2D(int(128 * alpha/4), (1, 1), strides=(1, 1), padding='same', use_bias=False)(x1)
        x1 = BatchNormalization()(x1)
        x1 = Activation('relu')(x1)
        xd.append(x1)
    x = concatenate(xd, axis=-1)

    xd = []
    for d in Dih4(x, name='3'):
        x1 = DepthwiseConvolution2D(int(128 * alpha), (3, 3), strides=(1, 1), padding='same', use_bias=False)(d)
        x1 = BatchNormalization()(x1)
        x1 = Activation('relu')(x1)
        x1 = Convolution2D(int(128 * alpha/4), (1, 1), strides=(1, 1), padding='same', use_bias=False)(x1)
        x1 = BatchNormalization()(x1)
        x1 = Activation('relu')(x1)
        xd.append(x1)
    x = concatenate(xd, axis=-1)

    xd = []
    for d in Dih4(x, name='4'):
        x1 = DepthwiseConvolution2D(int(128 * alpha), (3, 3), strides=(2, 2), padding='same', use_bias=False)(d)
        x1 = BatchNormalization()(x1)
        x1 = Activation('relu')(x1)
        x1 = Convolution2D(int(256 * alpha/4), (1, 1), strides=(1, 1), padding='same', use_bias=False)(x1)
        x1 = BatchNormalization()(x1)
        x1 = Activation('relu')(x1)
        xd.append(x1)
    x = concatenate(xd, axis=-1)

    xd = []
    for d in Dih4(x, name='5'):
        x1 = DepthwiseConvolution2D(int(256 * alpha), (3, 3), strides=(1, 1), padding='same', use_bias=False)(d)
        x1 = BatchNormalization()(x1)
        x1 = Activation('relu')(x1)
        x1 = Convolution2D(int(256 * alpha/4), (1, 1), strides=(1, 1), padding='same', use_bias=False)(x1)
        x1 = BatchNormalization()(x1)
        x1 = Activation('relu')(x1)
        xd.append(x1)
    x = concatenate(xd, axis=-1)

    xd = []
    for d in Dih4(x, name='6'):
        x1 = DepthwiseConvolution2D(int(256 * alpha), (3, 3), strides=(2, 2), padding='same', use_bias=False)(d)
        x1 = BatchNormalization()(x1)
        x1 = Activation('relu')(x1)
        x1 = Convolution2D(int(512 * alpha/4), (1, 1), strides=(1, 1), padding='same', use_bias=False)(x1)
        x1 = BatchNormalization()(x1)
        x1 = Activation('relu')(x1)
        xd.append(x1)
    x = concatenate(xd, axis=-1)

    xd = []
    for d in Dih4(x, name='7'):
        x1 = DepthwiseConvolution2D(int(512 * alpha), (3, 3), strides=(1, 1), padding='same', use_bias=False)(d)
        x1 = BatchNormalization()(x1)
        x1 = Activation('relu')(x1)
        x1 = Convolution2D(int(512 * alpha/4), (1, 1), strides=(1, 1), padding='same', use_bias=False)(x1)
        x1 = BatchNormalization()(x1)
        x1 = Activation('relu')(x1)
        xd.append(x1)
    x = concatenate(xd, axis=-1)

    xd = []
    for d in Dih4(x, name='8'):
        x1 = DepthwiseConvolution2D(int(512 * alpha), (3, 3), strides=(2, 2), padding='same', use_bias=False)(d)
        x1 = BatchNormalization()(x1)
        x1 = Activation('relu')(x1)
        x1 = Convolution2D(int(1024 * alpha/4), (1, 1), strides=(1, 1), padding='same', use_bias=False)(x1)
        x1 = BatchNormalization()(x1)
        x1 = Activation('relu')(x1)
        xd.append(x1)
    x = concatenate(xd, axis=-1)

    for d in Dih4(x, name='9'):
        x1 = DepthwiseConvolution2D(int(1024 * alpha), (3, 3), strides=(1, 1), padding='same', use_bias=False)(d)
        x1 = BatchNormalization()(x1)
        x1 = Activation('relu')(x1)
        x1 = Convolution2D(int(1024 * alpha/4), (1, 1), strides=(1, 1), padding='same', use_bias=False)(x1)
        x1 = BatchNormalization()(x1)
        x1 = Activation('relu')(x1)
        xd.append(x1)
    x = concatenate(xd, axis=-1)
    x = GlobalAveragePooling2D()(x)
    x = Dropout(0.35)(x)

    x = Dense(256)(x)
    x = BatchNormalization()(x)
    x = Activation('tanh')(x)
    out = []
    for l in config:
        out.append(Dense(config[l], activation='linear', name=str(l))(x))

    model = Model(img_input, out, name='mobilenet_dih')
    return model