Example #1
0
 def conv2d_block(input, filters, strides=1, bn=True):
     d = SeparableConv2D(filters, kernel_size=3, strides=strides, padding='same')(input)
     if bn:
         d = BatchNormalization(momentum=0.8)(d)
     d = LeakyReLU(alpha=0.2)(d)
     return d
Example #2
0
def detect_model(classcnt):
    model_input = Input(shape=(INPUT_HEIGHT, INPUT_WIDTH, 3))

    x = model_input
    x05 = Conv2D(64, (5, 5),
                 strides=(3, 4),
                 activation=ACTFUNC,
                 padding='same',
                 name='block0_conv1')(x)
    x07 = Conv2D(64, (7, 7),
                 strides=(3, 4),
                 activation=ACTFUNC,
                 padding='same',
                 name='block0_conv2')(x)
    x09 = Conv2D(64, (9, 9),
                 strides=(3, 4),
                 activation=ACTFUNC,
                 padding='same',
                 name='block0_conv3')(x)
    x11 = Conv2D(64, (11, 11),
                 strides=(3, 4),
                 activation=ACTFUNC,
                 padding='same',
                 name='block0_conv4')(x)
    x = layers.concatenate([x05, x07, x09, x11])

    x = Conv2D(32, (3, 3), strides=(2, 2), use_bias=False,
               name='block1_conv1')(x)
    x = BatchNormalization(name='block1_conv1_bn')(x)
    x = Activation('relu', name='block1_conv1_act')(x)
    x = Conv2D(64, (3, 3), use_bias=False, name='block1_conv2')(x)
    x = BatchNormalization(name='block1_conv2_bn')(x)
    x = Activation('relu', name='block1_conv2_act')(x)

    residual = Conv2D(128, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(128, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block2_sepconv1')(x)
    x = BatchNormalization(name='block2_sepconv1_bn')(x)
    x = Activation('relu', name='block2_sepconv2_act')(x)
    x = SeparableConv2D(128, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block2_sepconv2')(x)
    x = BatchNormalization(name='block2_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block2_pool')(x)
    x = layers.add([x, residual])

    residual = Conv2D(256, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block3_sepconv1_act')(x)
    x = SeparableConv2D(256, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block3_sepconv1')(x)
    x = BatchNormalization(name='block3_sepconv1_bn')(x)
    x = Activation('relu', name='block3_sepconv2_act')(x)
    x = SeparableConv2D(256, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block3_sepconv2')(x)
    x = BatchNormalization(name='block3_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block3_pool')(x)
    x = layers.add([x, residual])

    residual = Conv2D(728, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block4_sepconv1_act')(x)
    x = SeparableConv2D(728, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block4_sepconv1')(x)
    x = BatchNormalization(name='block4_sepconv1_bn')(x)
    x = Activation('relu', name='block4_sepconv2_act')(x)
    x = SeparableConv2D(728, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block4_sepconv2')(x)
    x = BatchNormalization(name='block4_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block4_pool')(x)
    x = layers.add([x, residual])

    for i in range(8):
        residual = x
        prefix = 'block' + str(i + 5)

        x = Activation('relu', name=prefix + '_sepconv1_act')(x)
        x = SeparableConv2D(728, (3, 3),
                            padding='same',
                            use_bias=False,
                            name=prefix + '_sepconv1')(x)
        x = BatchNormalization(name=prefix + '_sepconv1_bn')(x)
        x = Activation('relu', name=prefix + '_sepconv2_act')(x)
        x = SeparableConv2D(728, (3, 3),
                            padding='same',
                            use_bias=False,
                            name=prefix + '_sepconv2')(x)
        x = BatchNormalization(name=prefix + '_sepconv2_bn')(x)
        x = Activation('relu', name=prefix + '_sepconv3_act')(x)
        x = SeparableConv2D(728, (3, 3),
                            padding='same',
                            use_bias=False,
                            name=prefix + '_sepconv3')(x)
        x = BatchNormalization(name=prefix + '_sepconv3_bn')(x)

        x = layers.add([x, residual])

    residual = Conv2D(1024, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block13_sepconv1_act')(x)
    x = SeparableConv2D(728, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block13_sepconv1')(x)
    x = BatchNormalization(name='block13_sepconv1_bn')(x)
    x = Activation('relu', name='block13_sepconv2_act')(x)
    x = SeparableConv2D(1024, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block13_sepconv2')(x)
    x = BatchNormalization(name='block13_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block13_pool')(x)
    x = layers.add([x, residual])

    x = SeparableConv2D(1536, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block14_sepconv1')(x)
    x = BatchNormalization(name='block14_sepconv1_bn')(x)
    x = Activation('relu', name='block14_sepconv1_act')(x)

    x = SeparableConv2D(2048, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block14_sepconv2')(x)
    x = BatchNormalization(name='block14_sepconv2_bn')(x)
    x = Activation('relu', name='block14_sepconv2_act')(x)

    x = layers.GlobalAveragePooling2D(name='avg_pool')(x)

    pred = Dense(3, activation='softmax', name='pred')(x)
    #x = Flatten()(x)
    #x = Dense(512, activation=ACTFUNC)(x)
    out = Dense(classcnt, activation='softmax', name='out')(x)

    model = Model(inputs=model_input, outputs=[pred, out])
    return model
Example #3
0
def mini_XCEPTION(input_shape, num_classes, l2_regularization=0.01):
    regularization = l2(l2_regularization)

    # base
    img_input = Input(input_shape)
    x = Conv2D(8, (3, 3),
               strides=(1, 1),
               kernel_regularizer=regularization,
               use_bias=False)(img_input)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Conv2D(8, (3, 3),
               strides=(1, 1),
               kernel_regularizer=regularization,
               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # module 1
    residual = Conv2D(16, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(16, (3, 3),
                        padding='same',
                        kernel_regularizer=regularization,
                        use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = SeparableConv2D(16, (3, 3),
                        padding='same',
                        kernel_regularizer=regularization,
                        use_bias=False)(x)
    x = BatchNormalization()(x)

    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
    x = layers.add([x, residual])

    # module 2
    residual = Conv2D(32, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(32, (3, 3),
                        padding='same',
                        kernel_regularizer=regularization,
                        use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = SeparableConv2D(32, (3, 3),
                        padding='same',
                        kernel_regularizer=regularization,
                        use_bias=False)(x)
    x = BatchNormalization()(x)

    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
    x = layers.add([x, residual])

    # module 3
    residual = Conv2D(64, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(64, (3, 3),
                        padding='same',
                        kernel_regularizer=regularization,
                        use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = SeparableConv2D(64, (3, 3),
                        padding='same',
                        kernel_regularizer=regularization,
                        use_bias=False)(x)
    x = BatchNormalization()(x)

    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
    x = layers.add([x, residual])

    # module 4
    residual = Conv2D(128, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(128, (3, 3),
                        padding='same',
                        kernel_regularizer=regularization,
                        use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = SeparableConv2D(128, (3, 3),
                        padding='same',
                        kernel_regularizer=regularization,
                        use_bias=False)(x)
    x = BatchNormalization()(x)

    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
    x = layers.add([x, residual])

    x = Conv2D(
        num_classes,
        (3, 3),
        # kernel_regularizer=regularization,
        padding='same')(x)
    x = GlobalAveragePooling2D()(x)
    output = Activation('softmax', name='predictions')(x)

    model = Model(img_input, output)
    return model
Example #4
0
def creatXception(data_info=None,Div=32,upsample=False,train=True,name='header_model'):

    # build the network with ImageNet weights
    inputShape = (data_info.IMG_ROW, data_info.IMG_COL, 3)

    if upsample is True:
        base_model = xception.Xception(input_shape=inputShape,Div=Div)
        if train:
            base_model.load_weights(data_info.base_model_weight_file)
        print 'you chose upsample mode'

        res_out = base_model.get_layer('block14_sepconv1_bn').output
        residual = keras.layers.Conv2D(1024, (1, 1), strides=(2, 2), padding='same', use_bias=False)(res_out)
        residual = keras.layers.BatchNormalization()(residual)

        x = base_model.output
        x = keras.layers.SeparableConv2D(1024, (3, 3), padding='same', use_bias=False, name='block15_sepconv1')(x)
        x = keras.layers.BatchNormalization(name='block15_sepconv1_bn')(x)
        x = keras.layers.MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block15_pool')(x)
        x = keras.layers.add([x, residual], name='block15_add')
        x = keras.layers.Activation('relu', name='block15_sepconv1_act')(x)

        x = keras.layers.SeparableConv2D(1024, (3, 3), padding='same', use_bias=False, name='block15_sepconv2')(x)
        x = keras.layers.BatchNormalization(name='block15_sepconv2_bn')(x)
        x = keras.layers.Activation('relu', name='block15_sepconv2_act')(x)

        x = UpSampling2D((2, 2))(x)
        x = keras.layers.concatenate([x, base_model.output])
        x = SeparableConv2D(1024, (3, 3), use_bias=False, padding='same', name='up_conv0')(x)
        x = BatchNormalization(name='up_conv0_bn')(x)
        x = Activation('relu', name='up_conv0_act')(x)

        if data_info.pixel_level == 3 or data_info.pixel_level == 2 :
            x = UpSampling2D((2, 2))(x)
            x = keras.layers.concatenate([x, base_model.get_layer('block13_sepconv2_act').output])
            x = SeparableConv2D(1024, (3, 3), use_bias=False, padding='same', name='up_conv1')(x)
            x = BatchNormalization(name='up_conv1_bn')(x)
            x = Activation('relu', name='up_conv1_act')(x)
        if data_info.pixel_level == 2 :
            x = UpSampling2D((2, 2))(x)
            x = keras.layers.concatenate([x, base_model.get_layer('block4_sepconv2_act').output])
            x = SeparableConv2D(1024, (3, 3), use_bias=False, padding='same', name='up_conv2')(x)
            x = BatchNormalization(name='up_conv2_bn')(x)
            x = Activation('relu', name='up_conv2_act')(x)

        x = Dropout(0.5)(x)
        x = Conv2D(256, (1, 1), use_bias=False, name='out_conv1')(x)
        x = Dropout(0.5)(x)
        x = Conv2D(256, (1, 1), use_bias=False, name='out_conv2')(x)
        x = Dropout(0.5)(x)

    else:

        base_model = xception.Xception(weights='imagenet', input_shape=inputShape,Div=Div)
        data_info.base_model=base_model
        x = base_model.output

        #x = Dropout(0.5)(x)
        x = Conv2D(256, (1, 1), use_bias=False, name='out_conv1')(x)
        #x = Dropout(0.5)(x)

    header_model = Model(inputs=base_model.input, outputs=x, name=name)

    if train:
        x = header_model.output
        x = Conv2D(data_info.class_num, (1, 1), use_bias=False, name='conv_out')(x)
        seg_output = Activation('softmax', name='seg_out')(x)
        x = GlobalAveragePooling2D()(x)
        main_output = Activation('softmax', name='main_out')(x)
        #with tf.device('/cpu:0'):
        model = Model(inputs=header_model.input, outputs=[main_output,seg_output], name='train_model')
        return model
    else:
        return header_model
Example #5
0
           dilation_rate=2,
           name='res5c_branch2c')(x)
y = _shortcut(y, x)

Resnet = y
network = ASPP(Resnet)
# print(network.shape)

network = UpSampling2D(size=(2, 2), name='US1')(network)
network = Conv2D(64, (3, 3),
                 activation='relu',
                 padding='same',
                 strides=(1, 1),
                 name='up1')(network)
# print(network.shape)
Low_f8x = SeparableConv2D(48, (1, 1), padding='same')(x_down2)


def concat(listt):
    return K.concatenate(listt, axis=-1)


combine_1 = [Low_f8x, network]

network = Lambda(concat)(combine_1)

network = UpSampling2D(size=(2, 2), name='US2')(network)
network = Conv2D(64, (3, 3),
                 activation='relu',
                 padding='same',
                 strides=(1, 1),
Example #6
0
           use_bias=False)(img_input)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(8, (3, 3),
           strides=(1, 1),
           kernel_regularizer=regularization,
           use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)

# module 1
residual = Conv2D(16, (1, 1), strides=(2, 2), padding='same',
                  use_bias=False)(x)
residual = BatchNormalization()(residual)
x = SeparableConv2D(16, (3, 3),
                    padding='same',
                    kernel_regularizer=regularization,
                    use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(16, (3, 3),
                    padding='same',
                    kernel_regularizer=regularization,
                    use_bias=False)(x)
x = BatchNormalization()(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = layers.add([x, residual])

# module 2
residual = Conv2D(32, (1, 1), strides=(2, 2), padding='same',
                  use_bias=False)(x)
residual = BatchNormalization()(residual)
Example #7
0
def getmodel(image_shape):
    #part 1
    img_input = Input(shape=image_shape)
    x = Conv2D(32, (3, 3), strides=(2, 2), use_bias=False)(img_input)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(64, (3, 3), strides=(2, 2), use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    #this part gets repeated 4 times
    residual = Conv2D(128, (1, 1), strides=(2, 2),\
                      padding='same', use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
    x = layers.add([x, residual])
    #2
    residual = Conv2D(128, (1, 1), strides=(2, 2),\
                      padding='same', use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
    x = layers.add([x, residual])
    #3
    residual = Conv2D(128, (1, 1), strides=(2, 2),\
                      padding='same', use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
    x = layers.add([x, residual])
    #4
    residual = Conv2D(128, (1, 1), strides=(2, 2),\
                      padding='same', use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
    x = layers.add([x, residual])

    x = Conv2D(64, (2, 2), strides=(2, 2), use_bias=False)(x)
    x = GlobalAveragePooling2D()(x)
    output = Dense(1, activation='softmax', name='predictions')(x)
    model = Model(inputs=img_input, outputs=output)
    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])
    return model
Example #8
0
def Xception_model(include_top=False,
                   weights=None,
                   input_tensor=None,
                   input_shape=None,
                   pooling=None,
                   classes=1000,
                   attention_module=None,
                   **kwargs):
    """Instantiates the Xception architecture.
    Optionally loads weights pre-trained on ImageNet.
    Note that the data format convention used by the model is
    the one specified in your Keras config at `~/.keras/keras.json`.
    Note that the default input image size for this model is 299x299.
    # Arguments
        include_top: whether to include the fully-connected
            layer at the top of the network.
        weights: one of `None` (random initialization),
              'imagenet' (pre-training on ImageNet),
              or the path to the weights file to be loaded.
        input_tensor: optional Keras tensor
            (i.e. output of `Input()`)
            to use as image input for the model.
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(299, 299, 3)`.
            It should have exactly 3 inputs channels,
            and width and height should be no smaller than 71.
            E.g. `(150, 150, 3)` would be one valid value.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model will be
                the 4D tensor output of the
                last convolutional block.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional block, and thus
                the output of the model will be a 2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True,
            and if no `weights` argument is specified.
    # Returns
        A Keras model instance.
    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape.
        RuntimeError: If attempting to run this model with a
            backend that does not support separable convolutions.
    """
    global backend, layers, models, keras_utils
    backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)

    if not (weights in {'imagenet', None} or os.path.exists(weights)):
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization), `imagenet` '
                         '(pre-training on ImageNet), '
                         'or the path to the weights file to be loaded.')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError(
            'If using `weights` as `"imagenet"` with `include_top`'
            ' as true, `classes` should be 1000')

    # Determine proper input shape
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=299,
                                      min_size=71,
                                      data_format=backend.image_data_format(),
                                      require_flatten=include_top,
                                      weights=weights)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not backend.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1

    x = Convolution2D(32, (3, 3),
                      strides=(2, 2),
                      use_bias=False,
                      name='block1_conv1')(img_input)
    x = BatchNormalization(axis=channel_axis, name='block1_conv1_bn')(x)
    x = Activation('relu', name='block1_conv1_act')(x)
    x = Convolution2D(64, (3, 3), use_bias=False, name='block1_conv2')(x)
    x = BatchNormalization(axis=channel_axis, name='block1_conv2_bn')(x)
    x = Activation('relu', name='block1_conv2_act')(x)

    residual = Convolution2D(128, (1, 1),
                             strides=(2, 2),
                             padding='same',
                             use_bias=False)(x)
    residual = BatchNormalization(axis=channel_axis)(residual)

    x = SeparableConv2D(128, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block2_sepconv1')(x)
    x = BatchNormalization(axis=channel_axis, name='block2_sepconv1_bn')(x)
    x = Activation('relu', name='block2_sepconv2_act')(x)
    x = SeparableConv2D(128, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block2_sepconv2')(x)
    x = BatchNormalization(axis=channel_axis, name='block2_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block2_pool')(x)
    x = add([x, residual])

    if attention_module is not None:
        x = attach_attention_module(x, attention_module)

    residual = Convolution2D(256, (1, 1),
                             strides=(2, 2),
                             padding='same',
                             use_bias=False)(x)
    residual = BatchNormalization(axis=channel_axis)(residual)

    x = Activation('relu', name='block3_sepconv1_act')(x)
    x = SeparableConv2D(256, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block3_sepconv1')(x)
    x = BatchNormalization(axis=channel_axis, name='block3_sepconv1_bn')(x)
    x = Activation('relu', name='block3_sepconv2_act')(x)
    x = SeparableConv2D(256, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block3_sepconv2')(x)
    x = BatchNormalization(axis=channel_axis, name='block3_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block3_pool')(x)
    x = add([x, residual])

    if attention_module is not None:
        x = attach_attention_module(x, attention_module)

    residual = Convolution2D(728, (1, 1),
                             strides=(2, 2),
                             padding='same',
                             use_bias=False)(x)
    residual = BatchNormalization(axis=channel_axis)(residual)

    if attention_module is not None:
        residual = attach_attention_module(residual, attention_module)

    x = Activation('relu', name='block4_sepconv1_act')(x)
    x = SeparableConv2D(728, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block4_sepconv1')(x)
    x = BatchNormalization(axis=channel_axis, name='block4_sepconv1_bn')(x)
    x = Activation('relu', name='block4_sepconv2_act')(x)
    x = SeparableConv2D(728, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block4_sepconv2')(x)
    x = BatchNormalization(axis=channel_axis, name='block4_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block4_pool')(x)
    x = add([x, residual])

    for i in range(8):
        residual = x
        prefix = 'block' + str(i + 5)

        x = Activation('relu', name=prefix + '_sepconv1_act')(x)
        x = SeparableConv2D(728, (3, 3),
                            padding='same',
                            use_bias=False,
                            name=prefix + '_sepconv1')(x)
        x = BatchNormalization(axis=channel_axis,
                               name=prefix + '_sepconv1_bn')(x)
        x = Activation('relu', name=prefix + '_sepconv2_act')(x)
        x = SeparableConv2D(728, (3, 3),
                            padding='same',
                            use_bias=False,
                            name=prefix + '_sepconv2')(x)
        x = BatchNormalization(axis=channel_axis,
                               name=prefix + '_sepconv2_bn')(x)
        x = Activation('relu', name=prefix + '_sepconv3_act')(x)
        x = SeparableConv2D(728, (3, 3),
                            padding='same',
                            use_bias=False,
                            name=prefix + '_sepconv3')(x)
        x = BatchNormalization(axis=channel_axis,
                               name=prefix + '_sepconv3_bn')(x)

        if attention_module is not None:
            x = attach_attention_module(x, attention_module)

        x = add([x, residual])

    residual = Convolution2D(1024, (1, 1),
                             strides=(2, 2),
                             padding='same',
                             use_bias=False)(x)
    residual = BatchNormalization(axis=channel_axis)(residual)

    if attention_module is not None:
        residual = attach_attention_module(residual, attention_module)

    x = Activation('relu', name='block13_sepconv1_act')(x)
    x = SeparableConv2D(728, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block13_sepconv1')(x)
    x = BatchNormalization(axis=channel_axis, name='block13_sepconv1_bn')(x)
    x = Activation('relu', name='block13_sepconv2_act')(x)
    x = SeparableConv2D(1024, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block13_sepconv2')(x)
    x = BatchNormalization(axis=channel_axis, name='block13_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block13_pool')(x)
    x = add([x, residual])

    x = SeparableConv2D(1536, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block14_sepconv1')(x)
    x = BatchNormalization(axis=channel_axis, name='block14_sepconv1_bn')(x)
    x = Activation('relu', name='block14_sepconv1_act')(x)

    x = SeparableConv2D(2048, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block14_sepconv2')(x)
    x = BatchNormalization(axis=channel_axis, name='block14_sepconv2_bn')(x)
    x = Activation('relu', name='block14_sepconv2_act')(x)

    if include_top:
        x = GlobalAveragePooling2D(name='avg_pool')(x)
        x = Dense(classes, activation='softmax', name='predictions')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = keras_utils.get_source_inputs(input_tensor)
    else:
        inputs = img_input
    # Create model.

    #x = BatchNormalization(axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer='uniform')(x)
    #x = Activation('relu')(x)
    #x = GlobalAveragePooling2D()(x)
    x = GlobalAveragePooling2D()(x)
    outputs = Dense(classes,
                    activation='relu',
                    kernel_initializer='he_normal',
                    kernel_regularizer=l2(weight_decay))(x)
    model = models.Model(inputs, outputs, name='xception')

    # Load weights.
    if weights == 'imagenet':
        if include_top:
            weights_path = keras_utils.get_file(
                'xception_weights_tf_dim_ordering_tf_kernels.h5',
                TF_WEIGHTS_PATH,
                cache_subdir='models',
                file_hash='0a58e3b7378bc2990ea3b43d5981f1f6')
        else:
            weights_path = keras_utils.get_file(
                'xception_weights_tf_dim_ordering_tf_kernels_notop.h5',
                TF_WEIGHTS_PATH_NO_TOP,
                cache_subdir='models',
                file_hash='b0042744bf5b25fce3cb969f33bebb97')
        model.load_weights(weights_path)
        if backend.backend() == 'theano':
            keras_utils.convert_all_kernels_in_model(model)
    elif weights is not None:
        model.load_weights(weights)

    return model
def Xception(include_top=True,
             weights='imagenet',
             input_tensor=None,
             input_shape=None,
             pooling=None,
             classes=1000):
    if weights not in {'imagenet', None}:
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization) or `imagenet` '
                         '(pre-training on ImageNet).')
    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError('If using `weights` as imagenet with `include_top`'
                         ' as true, `classes` should be 1000')
    if K.backend() != 'tensorflow':
        raise RuntimeError('The Xception model is only available with '
                           'the TensorFlow backend.')
    if K.image_data_format() != 'channels_last':
        warnings.warn(
            'The Xception model is only available for the '
            'input data format "channels_last" '
            '(width, height, channels). '
            'However your settings specify the default '
            'data format "channels_first" (channels, width, height). '
            'You should set `image_data_format="channels_last"` in your Keras '
            'config located at ~/.keras/keras.json. '
            'The model being returned right now will expect inputs '
            'to follow the "channels_last" data format.')
        K.set_image_data_format('channels_last')
        old_data_format = 'channels_first'
    else:
        old_data_format = None

    input_shape = _obtain_input_shape(input_shape,
                                      default_size=299,
                                      min_size=71,
                                      data_format=K.image_data_format(),
                                      require_flatten=include_top)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    x = Conv2D(32, (3, 3), strides=(2, 2), use_bias=False,
               name='block1_conv1')(img_input)
    x = BatchNormalization(name='block1_conv1_bn')(x)
    x = Activation('relu', name='block1_conv1_act')(x)
    x = Conv2D(64, (3, 3), use_bias=False, name='block1_conv2')(x)
    x = BatchNormalization(name='block1_conv2_bn')(x)
    x = Activation('relu', name='block1_conv2_act')(x)

    residual = Conv2D(128, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(128, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block2_sepconv1')(x)
    x = BatchNormalization(name='block2_sepconv1_bn')(x)
    x = Activation('relu', name='block2_sepconv2_act')(x)
    x = SeparableConv2D(128, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block2_sepconv2')(x)
    x = BatchNormalization(name='block2_sepconv2_bn')(x)
    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block2_pool')(x)
    x = layers.add([x, residual])

    residual = Conv2D(256, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block3_sepconv1_act')(x)
    x = SeparableConv2D(256, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block3_sepconv1')(x)
    x = BatchNormalization(name='block3_sepconv1_bn')(x)
    x = Activation('relu', name='block3_sepconv2_act')(x)
    x = SeparableConv2D(256, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block3_sepconv2')(x)
    x = BatchNormalization(name='block3_sepconv2_bn')(x)
    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block3_pool')(x)

    x = layers.add([x, residual])

    residual = Conv2D(728, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block4_sepconv1_act')(x)
    x = SeparableConv2D(728, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block4_sepconv1')(x)
    x = BatchNormalization(name='block4_sepconv1_bn')(x)
    x = Activation('relu', name='block4_sepconv2_act')(x)
    x = SeparableConv2D(728, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block4_sepconv2')(x)
    x = BatchNormalization(name='block4_sepconv2_bn')(x)
    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block4_pool')(x)
    x = layers.add([x, residual])

    for i in range(8):
        residual = x
        prefix = 'block' + str(i + 5)
        x = Activation('relu', name=prefix + '_sepconv1_act')(x)
        x = SeparableConv2D(728, (3, 3),
                            padding='same',
                            use_bias=False,
                            name=prefix + '_sepconv1')(x)
        x = BatchNormalization(name=prefix + '_sepconv1_bn')(x)
        x = Activation('relu', name=prefix + '_sepconv2_act')(x)
        x = SeparableConv2D(728, (3, 3),
                            padding='same',
                            use_bias=False,
                            name=prefix + '_sepconv2')(x)
        x = BatchNormalization(name=prefix + '_sepconv2_bn')(x)
        x = Activation('relu', name=prefix + '_sepconv3_act')(x)
        x = SeparableConv2D(728, (3, 3),
                            padding='same',
                            use_bias=False,
                            name=prefix + '_sepconv3')(x)
        x = BatchNormalization(name=prefix + '_sepconv3_bn')(x)
        x = layers.add([x, residual])

    residual = Conv2D(1024, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block13_sepconv1_act')(x)
    x = SeparableConv2D(728, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block13_sepconv1')(x)
    x = BatchNormalization(name='block13_sepconv1_bn')(x)
    x = Activation('relu', name='block13_sepconv2_act')(x)
    x = SeparableConv2D(1024, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block13_sepconv2')(x)
    x = BatchNormalization(name='block13_sepconv2_bn')(x)
    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block13_pool')(x)

    x = layers.add([x, residual])

    x = SeparableConv2D(1536, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block14_sepconv1')(x)
    x = BatchNormalization(name='block14_sepconv1_bn')(x)
    x = Activation('relu', name='block14_sepconv1_act')(x)
    x = SeparableConv2D(2048, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block14_sepconv2')(x)
    x = BatchNormalization(name='block14_sepconv2_bn')(x)
    x = Activation('relu', name='block14_sepconv2_act')(x)

    if include_top:
        x = GlobalAveragePooling2D(name='avg_pool')(x)
        x = Dense(classes, activation='softmax', name='predictions')(x)

    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input

    model = Model(inputs, x, name='xception')

    if weights == 'imagenet':
        if include_top:
            weights_path = get_file(
                'xception_weights_tf_dim_ordering_tf_kernels.h5',
                WEIGHTS_PATH,
                cache_subdir='models')
        else:
            weights_path = get_file(
                'xception_weights_tf_dim_ordering_tf_kernels_notop.h5',
                WEIGHTS_PATH,
                cache_subdir='models')
        model.load_weights(weights_path)

    if old_data_format:
        K.set_image_data_format(old_data_format)

    return model
Example #10
0
# Model 생성-----------------------------------------------------------
# 직접 Training과 Transfer Learning 두 가지를 진행
# FC 추가, CNN 추가 등 조건을 바꿔가며 실험
EfficientNetB0 = efn.EfficientNetB0(weights=None,
                                    include_top=False,
                                    input_shape=(224, 224, 3))
EfficientNetB0.load_weights(
    '../input/efficientnet-keras-weights-b0b5/efficientnet-b0_imagenet_1000_notop.h5'
)

inputs = Input(shape=(224, 224, 3))
x = EfficientNetB0(inputs)
residual = Conv2D(2048, (1, 1), strides=(2, 2), padding='same')(x)
x = SeparableConv2D(2048, (3, 3),
                    padding='same',
                    strides=1,
                    depth_multiplier=1,
                    depthwise_regularizer=l2(1e-15),
                    pointwise_regularizer=l2(1e-15))(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(2048, (3, 3),
                    padding='same',
                    strides=1,
                    depth_multiplier=1,
                    depthwise_regularizer=l2(1e-15),
                    pointwise_regularizer=l2(1e-15))(x)
x = BatchNormalization()(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
x = Add()([x, residual])
residual1 = Conv2D(2304, (1, 1), strides=(2, 2), padding='same')(x)
x = SeparableConv2D(2304, (3, 3),
def custom_model(learning_rate: float,
                 decay: float = 0,
                 dropout_rate: float = 0.3):
    model = Sequential()

    # YUV conversion and normalization
    model.add(
        Lambda(yuv_conversion, input_shape=(160, 320, 3), name='rgb_to_yuv'))
    model.add(Lambda(lambda x: x * 2 - 1, name='normalize'))

    # Crop the image to remove the sky and car hood
    model.add(
        Cropping2D(cropping=((CROP_TOP_BIG, CROP_BOTTOM_BIG), (0, 0)),
                   name='crop'))

    model.add(
        SeparableConv2D(24, (5, 5),
                        strides=(3, 3),
                        padding='valid',
                        depth_multiplier=16,
                        name='conv_1'))
    model.add(PReLU(name='conv_1_prelu'))

    model.add(
        SeparableConv2D(36, (5, 5),
                        strides=(3, 3),
                        padding='valid',
                        depth_multiplier=9,
                        name='conv_2'))
    model.add(PReLU(name='conv_2_prelu'))

    model.add(
        SeparableConv2D(48, (3, 3),
                        strides=(2, 2),
                        padding='valid',
                        depth_multiplier=6,
                        name='conv_3'))
    model.add(PReLU(name='conv_3_prelu'))

    model.add(
        SeparableConv2D(64, (2, 2),
                        strides=(2, 2),
                        padding='valid',
                        depth_multiplier=9,
                        name='conv_4'))
    model.add(PReLU(name='conv_4_prelu'))

    model.add(Flatten())
    model.add(Dropout(rate=dropout_rate))

    model.add(Dense(100, name='fc_1'))
    model.add(PReLU(name='fc_1_prelu'))

    model.add(Dense(50, name='fc_2'))
    model.add(PReLU(name='fc_2_prelu'))

    model.add(Dense(10, name='fc_3'))
    model.add(PReLU(name='fc_3_prelu'))

    model.add(Dense(1, name='angle'))

    adam = Adam(lr=learning_rate, decay=decay)
    model.compile(optimizer=adam, loss='mse')

    return model
def unet(vis):
    input_size = (256, 256, 1)
    inputs = Input(input_size)

    #Encoder

    conv1_1 = SeparableConv2D(64, (3, 3),
                              padding='same',
                              activation='relu',
                              kernel_initializer='he_normal')(inputs)
    conv1_2 = SeparableConv2D(64, (3, 3),
                              padding='same',
                              activation='relu',
                              kernel_initializer='he_normal')(conv1_1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1_2)

    conv2_1 = SeparableConv2D(128, (3, 3),
                              padding='same',
                              activation='relu',
                              kernel_initializer='he_normal')(pool1)
    conv2_2 = SeparableConv2D(128, (3, 3),
                              padding='same',
                              activation='relu',
                              kernel_initializer='he_normal')(conv2_1)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2_2)

    conv3_1 = SeparableConv2D(256, (3, 3),
                              padding='same',
                              activation='relu',
                              kernel_initializer='he_normal')(pool2)
    conv3_2 = SeparableConv2D(256, (3, 3),
                              padding='same',
                              activation='relu',
                              kernel_initializer='he_normal')(conv3_1)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3_2)

    conv4_1 = SeparableConv2D(512, (3, 3),
                              padding='same',
                              activation='relu',
                              kernel_initializer='he_normal')(pool3)
    conv4_2 = SeparableConv2D(512, (3, 3),
                              padding='same',
                              activation='relu',
                              kernel_initializer='he_normal')(conv4_1)
    drop4_2 = SpatialDropout2D(0.5)(conv4_2)
    pool4 = MaxPooling2D(pool_size=(2, 2))(drop4_2)

    conv5_1 = SeparableConv2D(1024, (3, 3),
                              padding='same',
                              activation='relu',
                              kernel_initializer='he_normal')(pool4)
    conv5_2 = SeparableConv2D(1024, (3, 3),
                              padding='same',
                              activation='relu',
                              kernel_initializer='he_normal')(conv5_1)
    drop5_2 = SpatialDropout2D(0.5)(conv5_2)

    #Decoder

    up6_1 = Conv2DTranspose(512, (2, 2),
                            strides=(2, 2),
                            padding='same',
                            activation='relu',
                            kernel_initializer='he_normal')(drop5_2)
    up6_2 = concatenate([drop4_2, up6_1], axis=3)
    conv6_1 = SeparableConv2D(512, (3, 3),
                              padding='same',
                              activation='relu',
                              kernel_initializer='he_normal')(up6_2)
    conv6_2 = SeparableConv2D(512, (3, 3),
                              padding='same',
                              activation='relu',
                              kernel_initializer='he_normal')(conv6_1)

    up7_1 = Conv2DTranspose(256, (2, 2),
                            strides=(2, 2),
                            padding='same',
                            activation='relu',
                            kernel_initializer='he_normal')(conv6_2)
    up7_2 = concatenate([conv3_2, up7_1], axis=3)
    conv7_1 = SeparableConv2D(256, (3, 3),
                              padding='same',
                              activation='relu',
                              kernel_initializer='he_normal')(up7_2)
    conv7_2 = SeparableConv2D(256, (3, 3),
                              padding='same',
                              activation='relu',
                              kernel_initializer='he_normal')(conv7_1)

    up8_1 = Conv2DTranspose(128, (2, 2),
                            strides=(2, 2),
                            padding='same',
                            activation='relu',
                            kernel_initializer='he_normal')(conv7_2)
    up8_2 = concatenate([conv2_2, up8_1], axis=3)
    conv8_1 = SeparableConv2D(128, (3, 3),
                              padding='same',
                              activation='relu',
                              kernel_initializer='he_normal')(up8_2)
    conv8_2 = SeparableConv2D(128, (3, 3),
                              padding='same',
                              activation='relu',
                              kernel_initializer='he_normal')(conv8_1)

    up9_1 = Conv2DTranspose(64, (2, 2),
                            strides=(2, 2),
                            padding='same',
                            activation='relu',
                            kernel_initializer='he_normal')(conv8_2)
    up9_2 = concatenate([conv1_2, up9_1], axis=3)
    conv9_1 = SeparableConv2D(64, (3, 3),
                              padding='same',
                              activation='relu',
                              kernel_initializer='he_normal')(up9_2)
    conv9_2 = SeparableConv2D(64, (3, 3),
                              padding='same',
                              activation='relu',
                              kernel_initializer='he_normal')(conv9_1)
    conv9_3 = SeparableConv2D(2,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer='he_normal')(conv9_2)

    conv10 = SeparableConv2D(1, 1, activation='sigmoid')(conv9_3)

    if (vis):
        outputs = [
            conv1_1, conv1_2, pool1, conv2_1, conv2_2, pool2, conv3_1, conv3_2,
            pool3, conv4_1, conv4_2, drop4_2, pool4, conv5_1, conv5_2, drop5_2,
            up6_1, up6_2, conv6_1, conv6_2, up7_1, up7_2, conv7_1, conv7_2,
            up8_1, up8_2, conv8_1, conv8_2, up9_1, up9_2, conv9_1, conv9_2,
            conv9_3, conv10
        ]

        model = Model(inputs=[inputs], outputs=outputs)
        model.summary()
    else:
        model = Model(inputs=[inputs], outputs=[conv10])
        model.summary()

    model.compile(optimizer=Adam(lr=1e-4),
                  loss=dice_coef_loss,
                  metrics=[dice_coef, 'accuracy'])

    return model
Example #13
0
def unet_1024(img_rows,
              img_cols,
              num_img_channels,
              num_mask_channels,
              train_batch_norm=True):

    inputs = Input(shape=(img_rows, img_cols, num_img_channels))
    # 1024

    with K.name_scope('Down512'):
        down0b = Conv2D(8, (3, 3), padding='same')(inputs)
        down0b = BatchNormalization()(down0b, training=train_batch_norm)
        down0b = Activation('relu')(down0b)
        down0b = Conv2D(8, (3, 3), padding='same')(down0b)
        down0b = BatchNormalization()(down0b, training=train_batch_norm)
        down0b = Activation('relu')(down0b)
        down0b_pool = MaxPooling2D((2, 2), strides=(2, 2))(down0b)
    # 512

    with K.name_scope('Down256'):
        down0a = SeparableConv2D(16, (3, 3), padding='same')(down0b_pool)
        down0a = BatchNormalization()(down0a, training=train_batch_norm)
        down0a = Activation('relu')(down0a)
        down0a = SeparableConv2D(16, (3, 3), padding='same')(down0a)
        down0a = BatchNormalization()(down0a, training=train_batch_norm)
        down0a = Activation('relu')(down0a)
        down0a_pool = MaxPooling2D((2, 2), strides=(2, 2))(down0a)
    # 256

    with K.name_scope('Down128'):
        down0 = SeparableConv2D(32, (3, 3), padding='same')(down0a_pool)
        down0 = BatchNormalization()(down0, training=train_batch_norm)
        down0 = Activation('relu')(down0)
        down0 = SeparableConv2D(32, (3, 3), padding='same')(down0)
        down0 = BatchNormalization()(down0, training=train_batch_norm)
        down0 = Activation('relu')(down0)
        down0_pool = MaxPooling2D((2, 2), strides=(2, 2))(down0)
    # 128

    with K.name_scope('Down64'):
        down1 = SeparableConv2D(64, (3, 3), padding='same')(down0_pool)
        down1 = BatchNormalization()(down1, training=train_batch_norm)
        down1 = Activation('relu')(down1)
        down1 = SeparableConv2D(64, (3, 3), padding='same')(down1)
        down1 = BatchNormalization()(down1, training=train_batch_norm)
        down1 = Activation('relu')(down1)
        down1_pool = MaxPooling2D((2, 2), strides=(2, 2))(down1)
    # 64

    with K.name_scope('Down32'):
        down2 = SeparableConv2D(128, (3, 3), padding='same')(down1_pool)
        down2 = BatchNormalization()(down2, training=train_batch_norm)
        down2 = Activation('relu')(down2)
        down2 = SeparableConv2D(128, (3, 3), padding='same')(down2)
        down2 = BatchNormalization()(down2, training=train_batch_norm)
        down2 = Activation('relu')(down2)
        down2_pool = MaxPooling2D((2, 2), strides=(2, 2))(down2)
    # 32

    with K.name_scope('Down16'):
        down3 = SeparableConv2D(256, (3, 3), padding='same')(down2_pool)
        down3 = BatchNormalization()(down3, training=train_batch_norm)
        down3 = Activation('relu')(down3)
        down3 = SeparableConv2D(256, (3, 3), padding='same')(down3)
        down3 = BatchNormalization()(down3, training=train_batch_norm)
        down3 = Activation('relu')(down3)
        down3_pool = MaxPooling2D((2, 2), strides=(2, 2))(down3)
    # 16

    with K.name_scope('Down8'):
        down4 = SeparableConv2D(512, (3, 3), padding='same')(down3_pool)
        down4 = BatchNormalization()(down4, training=train_batch_norm)
        down4 = Activation('relu')(down4)
        down4 = SeparableConv2D(512, (3, 3), padding='same')(down4)
        down4 = BatchNormalization()(down4, training=train_batch_norm)
        down4 = Activation('relu')(down4)
        down4_pool = MaxPooling2D((2, 2), strides=(2, 2))(down4)
    # 8

    with K.name_scope('Center'):
        center = SeparableConv2D(1024, (3, 3), padding='same')(down4_pool)
        center = BatchNormalization()(center, training=train_batch_norm)
        center = Activation('relu')(center)
        center = SeparableConv2D(1024, (3, 3), padding='same')(center)
        center = BatchNormalization()(center, training=train_batch_norm)
        center = Activation('relu')(center)
    # center

    with K.name_scope('Up16'):
        up4 = UpSampling2D((2, 2))(center)
        up4 = concatenate([down4, up4], axis=3)
        up4 = Conv2D(512, (3, 3), padding='same')(up4)
        up4 = BatchNormalization()(up4, training=train_batch_norm)
        up4 = Activation('relu')(up4)
        up4 = Conv2D(512, (3, 3), padding='same')(up4)
        up4 = BatchNormalization()(up4, training=train_batch_norm)
        up4 = Activation('relu')(up4)
    # 16

    with K.name_scope('Up32'):
        up3 = UpSampling2D((2, 2))(up4)
        up3 = concatenate([down3, up3], axis=3)
        up3 = Conv2D(256, (3, 3), padding='same')(up3)
        up3 = BatchNormalization()(up3, training=train_batch_norm)
        up3 = Activation('relu')(up3)
        up3 = Conv2D(256, (3, 3), padding='same')(up3)
        up3 = BatchNormalization()(up3, training=train_batch_norm)
        up3 = Activation('relu')(up3)
    # 32

    with K.name_scope('Up64'):
        up2 = UpSampling2D((2, 2))(up3)
        up2 = concatenate([down2, up2], axis=3)
        up2 = Conv2D(128, (3, 3), padding='same')(up2)
        up2 = BatchNormalization()(up2, training=train_batch_norm)
        up2 = Activation('relu')(up2)
        up2 = Conv2D(128, (3, 3), padding='same')(up2)
        up2 = BatchNormalization()(up2, training=train_batch_norm)
        up2 = Activation('relu')(up2)
    # 64

    with K.name_scope('Up128'):
        up1 = UpSampling2D((2, 2))(up2)
        up1 = concatenate([down1, up1], axis=3)
        up1 = Conv2D(64, (3, 3), padding='same')(up1)
        up1 = BatchNormalization()(up1, training=train_batch_norm)
        up1 = Activation('relu')(up1)
        up1 = Conv2D(64, (3, 3), padding='same')(up1)
        up1 = BatchNormalization()(up1, training=train_batch_norm)
        up1 = Activation('relu')(up1)
    # 128

    with K.name_scope('Up256'):
        up0 = UpSampling2D((2, 2))(up1)
        up0 = concatenate([down0, up0], axis=3)
        up0 = Conv2D(32, (3, 3), padding='same')(up0)
        up0 = BatchNormalization()(up0, training=train_batch_norm)
        up0 = Activation('relu')(up0)
        up0 = Conv2D(32, (3, 3), padding='same')(up0)
        up0 = BatchNormalization()(up0, training=train_batch_norm)
        up0 = Activation('relu')(up0)
    # 256

    with K.name_scope('Up512'):
        up0a = UpSampling2D((2, 2))(up0)
        up0a = concatenate([down0a, up0a], axis=3)
        up0a = Conv2D(16, (3, 3), padding='same')(up0a)
        up0a = BatchNormalization()(up0a, training=train_batch_norm)
        up0a = Activation('relu')(up0a)
        up0a = Conv2D(16, (3, 3), padding='same')(up0a)
        up0a = BatchNormalization()(up0a, training=train_batch_norm)
        up0a = Activation('relu')(up0a)
    # 512

    with K.name_scope('Up1024'):
        up0b = UpSampling2D((2, 2))(up0a)
        up0b = concatenate([down0b, up0b], axis=3)
        up0b = Conv2D(8, (3, 3), padding='same')(up0b)
        up0b = BatchNormalization()(up0b, training=train_batch_norm)
        up0b = Activation('relu')(up0b)
        up0b = Conv2D(8, (3, 3), padding='same')(up0b)
        up0b = BatchNormalization()(up0b, training=train_batch_norm)
        up0b = Activation('relu')(up0b)
    # 1024

    classify = Conv2D(num_mask_channels, (1, 1), activation='sigmoid')(up0b)
    model = Model(inputs=inputs, outputs=classify)

    return model
Example #14
0
def nn_base(input_tensor=None, trainable=False):

    # Determine proper input shape
    if K.image_dim_ordering() == 'th':
        input_shape = (3, None, None)
    else:
        input_shape = (None, None, 3)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    if K.image_dim_ordering() == 'tf':
        bn_axis = 3
    else:
        bn_axis = 1

    x = Conv2D(32, (3, 3), strides=(2, 2), use_bias=False,
               name='block1_conv1')(img_input)
    x = BatchNormalization(name='block1_conv1_bn')(x)
    x = Activation('relu', name='block1_conv1_act')(x)
    x = Conv2D(64, (3, 3), use_bias=False, name='block1_conv2')(x)
    x = BatchNormalization(name='block1_conv2_bn')(x)
    x = Activation('relu', name='block1_conv2_act')(x)

    residual = Conv2D(128, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(128, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block2_sepconv1')(x)
    x = BatchNormalization(name='block2_sepconv1_bn')(x)
    x = Activation('relu', name='block2_sepconv2_act')(x)
    x = SeparableConv2D(128, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block2_sepconv2')(x)
    x = BatchNormalization(name='block2_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block2_pool')(x)
    x = add([x, residual])

    residual = Conv2D(256, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block3_sepconv1_act')(x)
    x = SeparableConv2D(256, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block3_sepconv1')(x)
    x = BatchNormalization(name='block3_sepconv1_bn')(x)
    x = Activation('relu', name='block3_sepconv2_act')(x)
    x = SeparableConv2D(256, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block3_sepconv2')(x)
    x = BatchNormalization(name='block3_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block3_pool')(x)
    x = add([x, residual])

    residual = Conv2D(728, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block4_sepconv1_act')(x)
    x = SeparableConv2D(728, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block4_sepconv1')(x)
    x = BatchNormalization(name='block4_sepconv1_bn')(x)
    x = Activation('relu', name='block4_sepconv2_act')(x)
    x = SeparableConv2D(728, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block4_sepconv2')(x)
    x = BatchNormalization(name='block4_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block4_pool')(x)
    x = add([x, residual])

    for i in range(8):
        residual = x
        prefix = 'block' + str(i + 5)

        x = Activation('relu', name=prefix + '_sepconv1_act')(x)
        x = SeparableConv2D(728, (3, 3),
                            padding='same',
                            use_bias=False,
                            name=prefix + '_sepconv1')(x)
        x = BatchNormalization(name=prefix + '_sepconv1_bn')(x)
        x = Activation('relu', name=prefix + '_sepconv2_act')(x)
        x = SeparableConv2D(728, (3, 3),
                            padding='same',
                            use_bias=False,
                            name=prefix + '_sepconv2')(x)
        x = BatchNormalization(name=prefix + '_sepconv2_bn')(x)
        x = Activation('relu', name=prefix + '_sepconv3_act')(x)
        x = SeparableConv2D(728, (3, 3),
                            padding='same',
                            use_bias=False,
                            name=prefix + '_sepconv3')(x)
        x = BatchNormalization(name=prefix + '_sepconv3_bn')(x)

        x = add([x, residual])

    residual = Conv2D(1024, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block13_sepconv1_act')(x)
    x = SeparableConv2D(728, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block13_sepconv1')(x)
    x = BatchNormalization(name='block13_sepconv1_bn')(x)
    x = Activation('relu', name='block13_sepconv2_act')(x)
    x = SeparableConv2D(1024, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block13_sepconv2')(x)
    x = BatchNormalization(name='block13_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block13_pool')(x)
    x = add([x, residual])

    return x
Example #15
0
    input_shape = (patch_size, patch_size, 3)

    train, test, valid = load_data_to_labels(label_path,
                                             precision,
                                             train_fraction=0.7,
                                             test_fraction=0.15)

    train_len = len(train)
    test_len = len(test)
    valid_len = len(valid)
    results.write('Input data: train_len: ' + str(train_len) + ", test_len: " +
                  str(test_len) + ", valid_len: " + str(valid_len) + lbreak)

    model = Sequential()
    model.add(
        SeparableConv2D(32, 3, activation='relu', input_shape=input_shape))
    model.add(SeparableConv2D(64, 3, activation='relu'))
    model.add(Dropout(0.5))
    #model.add(BatchNormalization())
    model.add(MaxPooling2D(2))
    model.add(SeparableConv2D(64, 3, activation='relu'))
    model.add(SeparableConv2D(128, 3, activation='relu'))
    model.add(Dropout(0.5))
    #model.add(BatchNormalization())
    model.add(MaxPooling2D(2))
    model.add(SeparableConv2D(64, 3, activation='relu'))
    model.add(SeparableConv2D(128, 3, activation='relu'))
    model.add(Dropout(0.5))
    #model.add(BatchNormalization())
    model.add(MaxPooling2D(2))
    #model.add(SeparableConv2D(64, 3, activation='relu'))
Example #16
0
def create_model():

    train_gen = ImageDataGenerator()
    valid_gen = ImageDataGenerator()
    train_generator = train_gen.flow_from_directory(TRAIN_DATA_PATH,
                                                    IMAGE_SIZE,
                                                    shuffle=True,
                                                    batch_size=BATCH_SIZE,
                                                    color_mode='grayscale')
    valid_generator = valid_gen.flow_from_directory(VALID_DATA_PATH,
                                                    IMAGE_SIZE,
                                                    batch_size=BATCH_SIZE,
                                                    color_mode='grayscale')

    inputs = Input((*IMAGE_SIZE, 1))
    x_input = Lambda(my_preprocess)(inputs)

    # block1
    x = Conv2D(64, (3, 3),
               input_shape=(*IMAGE_SIZE, 1),
               strides=(1, 1),
               padding='same',
               activation='relu',
               name='block1_conv1')(x_input)
    x = Conv2D(64, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               name='block1_conv2')(x)
    x = MaxPooling2D((2, 2),
                     strides=(2, 2),
                     padding='same',
                     name='block1_pool')(x)

    # block2
    x = Conv2D(128, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               name='block2_conv1')(x)
    x = Conv2D(128, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               name='block2_conv2')(x)
    x = MaxPooling2D((2, 2),
                     strides=(2, 2),
                     padding='same',
                     name='block2_pool')(x)

    # block3
    x = Conv2D(256, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               name='block3_conv1')(x)
    x = Conv2D(256, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               name='block3_conv2')(x)
    x = Conv2D(256, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               name='block3_conv3')(x)
    x = MaxPooling2D((2, 2),
                     strides=(2, 2),
                     padding='same',
                     name='block3_pool')(x)

    # side1
    x_side1 = SeparableConv2D(512, (3, 3),
                              padding='same',
                              use_bias=False,
                              name='side1_sepconv1')(x)
    x_side1 = BatchNormalization(name='side1_bn1')(x_side1)
    x_side1 = Activation('relu', name='side1_act1')(x_side1)
    x_side1 = SeparableConv2D(512, (3, 3),
                              padding='same',
                              use_bias=False,
                              name='side1_sepconv2')(x_side1)
    x_side1 = BatchNormalization(name='side1_bn2')(x_side1)
    x_side1 = Activation('relu', name='side1_act2')(x_side1)
    x_side1 = MaxPooling2D((2, 2),
                           strides=(2, 2),
                           padding='same',
                           name='side1_pool')(x_side1)
    x_side1 = SeparableConv2D(728, (3, 3),
                              padding='same',
                              use_bias=False,
                              name='side1_sepconv3')(x_side1)
    x_side1 = BatchNormalization(name='side1_bn3')(x_side1)
    x_side1 = Activation('relu', name='side1_act3')(x_side1)
    x_side1 = SeparableConv2D(728, (3, 3),
                              padding='same',
                              activation='relu',
                              name='side1_sepconv4')(x_side1)
    x_side1 = GlobalAveragePooling2D(name='side1_gap')(x_side1)

    # side2
    x_side2_1_1 = Conv2D(256, (1, 1),
                         strides=(1, 1),
                         padding='same',
                         activation='relu',
                         name='side2_1_conv1')(x)
    x_side2_1_2 = Conv2D(256, (1, 1),
                         strides=(1, 1),
                         padding='same',
                         activation='relu',
                         name='side2_2_conv1')(x)
    x_side2_1_2 = Conv2D(256, (3, 3),
                         strides=(1, 1),
                         padding='same',
                         activation='relu',
                         name='side2_2_conv2')(x_side2_1_2)
    x_side2_1_3 = Conv2D(256, (3, 3),
                         strides=(1, 1),
                         padding='same',
                         activation='relu',
                         name='side2_3_conv1')(x)
    x_side2_1_3 = Conv2D(256, (1, 1),
                         strides=(1, 1),
                         padding='same',
                         activation='relu',
                         name='side2_3_conv2')(x_side2_1_3)
    x_side2_1 = keras.layers.concatenate(
        [x_side2_1_1, x_side2_1_2, x_side2_1_3])

    x_side2_2_1 = Conv2D(256, (1, 1),
                         strides=(1, 1),
                         padding='same',
                         activation='relu',
                         name='side3_1_conv1')(x_side2_1)
    x_side2_2_2 = Conv2D(256, (1, 1),
                         strides=(1, 1),
                         padding='same',
                         activation='relu',
                         name='side3_2_conv1')(x_side2_1)
    x_side2_2_2 = Conv2D(256, (3, 3),
                         strides=(1, 1),
                         padding='same',
                         activation='relu',
                         name='side3_2_conv2')(x_side2_2_2)
    x_side2_2_3 = Conv2D(256, (3, 3),
                         strides=(1, 1),
                         padding='same',
                         activation='relu',
                         name='side3_3_conv1')(x_side2_1)
    x_side2_2_3 = Conv2D(256, (1, 1),
                         strides=(1, 1),
                         padding='same',
                         activation='relu',
                         name='side3_3_conv2')(x_side2_2_3)

    x_side2_2 = keras.layers.concatenate(
        [x_side2_2_1, x_side2_2_2, x_side2_2_3])
    x_side2 = GlobalAveragePooling2D(name='side2_gap')(x_side2_2)

    # block4
    x = Conv2D(512, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               name='block4_conv1')(x)
    x = Conv2D(512, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               name='block4_conv2')(x)
    x = Conv2D(512, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               name='block4_conv3')(x)

    x = GlobalAveragePooling2D(name='gap')(x)

    x = keras.layers.concatenate([x, x_side1, x_side2])

    x = Dropout(DROP_RATE, name='dropout1')(x)
    predictions = Dense(CLASS_NUM, activation='softmax', name='dense1')(x)
    model = Model(inputs=inputs, outputs=predictions)
    model.summary()
    plot_model(model,
               to_file=os.path.join(RESULT_PATH, 'my_model.png'),
               show_shapes=True)

    check_point = ModelCheckpoint(monitor='val_loss',
                                  filepath=os.path.join(
                                      MODEL_PATH, MODEL_NAME),
                                  verbose=1,
                                  save_best_only=True,
                                  save_weights_only=False,
                                  mode='auto')

    # early stopping
    early_stopping = EarlyStopping(monitor='val_loss',
                                   patience=EARLY_STOPPING_PATIENCE,
                                   verbose=0,
                                   mode='auto')

    # reduce lr
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.1,
                                  patience=REDUCE_LR_PATIENCE,
                                  verbose=0,
                                  mode='auto',
                                  epsilon=0.0001,
                                  cooldown=0,
                                  min_lr=0)

    # 创建一个 LossHistory 实例
    history = LossHistory()

    # compile
    model.compile(optimizer=adam(lr=LEARNING_RATE),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    # fit
    model.fit_generator(train_generator,
                        steps_per_epoch=train_generator.samples // BATCH_SIZE,
                        epochs=EPOCHS,
                        validation_data=valid_generator,
                        validation_steps=valid_generator.samples // BATCH_SIZE,
                        callbacks=[check_point, early_stopping, history])

    # 绘制 loss 曲线和 batch 曲线
    history.loss_plot('batch', os.path.join(RESULT_PATH, 'my_loss_batch.png'))
    history.acc_plot('batch', os.path.join(RESULT_PATH, 'my_batch.png'))
    history.loss_plot('epoch', os.path.join(RESULT_PATH, 'my_loss_epoch.png'))
    history.acc_plot('epoch', os.path.join(RESULT_PATH, 'my_acc_epoch.png'))
    K.clear_session()
Example #17
0
def Xception(include_top=True,
             weights='imagenet',
             input_tensor=None,
             input_shape=None,
             pooling=None,
             classes=1000,
             factor=8):
    """Instantiates the Xception architecture.

    Optionally loads weights pre-trained
    on ImageNet. This model is available for TensorFlow only,
    and can only be used with inputs following the TensorFlow
    data format `(width, height, channels)`.
    You should set `image_data_format="channels_last"` in your Keras config
    located at ~/.keras/keras.json.

    Note that the default input image size for this model is 299x299.

    # Arguments
        include_top: whether to include the fully-connected
            layer at the top of the network.
        weights: one of `None` (random initialization)
            or "imagenet" (pre-training on ImageNet).
        input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
            to use as image input for the model.
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(299, 299, 3)`.
            It should have exactly 3 inputs channels,
            and width and height should be no smaller than 71.
            E.g. `(150, 150, 3)` would be one valid value.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model will be
                the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a 2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True, and
            if no `weights` argument is specified.

    # Returns
        A Keras model instance.

    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape.
        RuntimeError: If attempting to run this model with a
            backend that does not support separable convolutions.
    """
    if weights not in {'imagenet', None}:
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization) or `imagenet` '
                         '(pre-training on ImageNet).')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError('If using `weights` as imagenet with `include_top`'
                         ' as true, `classes` should be 1000')

    if K.backend() != 'tensorflow':
        raise RuntimeError('The Xception model is only available with '
                           'the TensorFlow backend.')
    if K.image_data_format() != 'channels_last':
        warnings.warn(
            'The Xception model is only available for the '
            'input data format "channels_last" '
            '(width, height, channels). '
            'However your settings specify the default '
            'data format "channels_first" (channels, width, height). '
            'You should set `image_data_format="channels_last"` in your Keras '
            'config located at ~/.keras/keras.json. '
            'The model being returned right now will expect inputs '
            'to follow the "channels_last" data format.')
        K.set_image_data_format('channels_last')
        old_data_format = 'channels_first'
    else:
        old_data_format = None

    # Determine proper input shape
    input_shape = input_shape

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    x = Conv2D(32 // factor, (3, 3),
               strides=(2, 2),
               use_bias=False,
               name='block1_conv1')(img_input)
    x = BatchNormalization(name='block1_conv1_bn')(x)
    x = Activation('relu', name='block1_conv1_act')(x)
    x = Conv2D(64 // factor, (3, 3), use_bias=False, name='block1_conv2')(x)
    x = BatchNormalization(name='block1_conv2_bn')(x)
    x = Activation('relu', name='block1_conv2_act')(x)

    residual = Conv2D(128 // factor, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(128 // factor, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block2_sepconv1')(x)
    x = BatchNormalization(name='block2_sepconv1_bn')(x)
    x = Activation('relu', name='block2_sepconv2_act')(x)
    x = SeparableConv2D(128 // factor, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block2_sepconv2')(x)
    x = BatchNormalization(name='block2_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block2_pool')(x)
    x = add([x, residual])

    residual = Conv2D(256 // factor, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block3_sepconv1_act')(x)
    x = SeparableConv2D(256 // factor, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block3_sepconv1')(x)
    x = BatchNormalization(name='block3_sepconv1_bn')(x)
    x = Activation('relu', name='block3_sepconv2_act')(x)
    x = SeparableConv2D(256 // factor, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block3_sepconv2')(x)
    x = BatchNormalization(name='block3_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block3_pool')(x)
    x = add([x, residual])

    residual = Conv2D(728 // factor, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block4_sepconv1_act')(x)
    x = SeparableConv2D(728 // factor, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block4_sepconv1')(x)
    x = BatchNormalization(name='block4_sepconv1_bn')(x)
    x = Activation('relu', name='block4_sepconv2_act')(x)
    x = SeparableConv2D(728 // factor, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block4_sepconv2')(x)
    x = BatchNormalization(name='block4_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block4_pool')(x)
    x = add([x, residual])  # for i in range(8):
    #     residual = x
    #     prefix = 'block' + str(i + 5)
    #
    #     x = Activation('relu', name=prefix + '_sepconv1_act')(x)
    #     x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name=prefix + '_sepconv1')(x)
    #     x = BatchNormalization(name=prefix + '_sepconv1_bn')(x)
    #     x = Activation('relu', name=prefix + '_sepconv2_act')(x)
    #     x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name=prefix + '_sepconv2')(x)
    #     x = BatchNormalization(name=prefix + '_sepconv2_bn')(x)
    #     x = Activation('relu', name=prefix + '_sepconv3_act')(x)
    #     x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name=prefix + '_sepconv3')(x)
    #     x = BatchNormalization(name=prefix + '_sepconv3_bn')(x)
    #
    #     x = add([x, residual])
    #
    # residual = Conv2D(1024, (1, 1), strides=(2, 2),
    #                   padding='same', use_bias=False)(x)
    # residual = BatchNormalization()(residual)
    #
    # x = Activation('relu', name='block13_sepconv1_act')(x)
    # x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block13_sepconv1')(x)
    # x = BatchNormalization(name='block13_sepconv1_bn')(x)
    # x = Activation('relu', name='block13_sepconv2_act')(x)
    # x = SeparableConv2D(1024, (3, 3), padding='same', use_bias=False, name='block13_sepconv2')(x)
    # x = BatchNormalization(name='block13_sepconv2_bn')(x)
    #
    # x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block13_pool')(x)
    # x = add([x, residual])
    #
    # x = SeparableConv2D(1536, (3, 3), padding='same', use_bias=False, name='block14_sepconv1')(x)
    # x = BatchNormalization(name='block14_sepconv1_bn')(x)
    # x = Activation('relu', name='block14_sepconv1_act')(x)
    #
    # x = SeparableConv2D(2048, (3, 3), padding='same', use_bias=False, name='block14_sepconv2')(x)
    # x = BatchNormalization(name='block14_sepconv2_bn')(x)
    # x = Activation('relu', name='block14_sepconv2_act')(x)

    if include_top:
        x = GlobalAveragePooling2D(name='avg_pool')(x)
        x = Dense(classes, activation='softmax', name='predictions')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input
    # Create model.
    model = Model(inputs, x, name='xception')

    # load weights
    if weights == 'imagenet':
        if include_top:
            weights_path = get_file(
                'xception_weights_tf_dim_ordering_tf_kernels.h5',
                TF_WEIGHTS_PATH,
                cache_subdir='models')
        else:
            weights_path = get_file(
                'xception_weights_tf_dim_ordering_tf_kernels_notop.h5',
                TF_WEIGHTS_PATH_NO_TOP,
                cache_subdir='models')
        model.load_weights(weights_path)

    if old_data_format:
        K.set_image_data_format(old_data_format)
    return model
Example #18
0
    def ReductionCell(hi,
                      h_i1sub,
                      filter_i,
                      filter_o,
                      stride=2,
                      name="NAS",
                      is_tail=False):
        """
        adjust feature size & channel size
        """
        h_i1sub = _adjust_block(h_i1sub, hi, filter_o, 0, name)

        hi = Conv2D(filter_o, (1, 1),
                    padding='same',
                    name='%s_hi_align' % name,
                    trainable=trainable,
                    kernel_regularizer=OrthLocalReg2D)(hi)
        hi = Activation('relu', name='%s_hi_align_relu' % name)(hi)
        hi = BatchNormalization(name='%s_hi_align_bn' % name)(hi)
        """
        SubLayer 1
        """
        l = SeparableConv2D(filter_o, (5, 5),
                            strides=(stride, stride),
                            padding='same',
                            name='%s_sep5x5_1' % name,
                            trainable=trainable,
                            depthwise_regularizer=OrthLocalRegSep2D)(hi)
        l = Activation('relu', name='%s_sep5x5_1_relu' % name)(l)
        l = BatchNormalization(name='%s_sep5x5_1_bn' % name)(l)

        if h_i1sub is not None:
            l_1sub = SeparableConv2D(
                filter_o, (7, 7),
                strides=(stride, stride),
                padding='same',
                name='%s_sep7x7_sub_1' % name,
                trainable=trainable,
                depthwise_regularizer=OrthLocalRegSep2D)(h_i1sub)
            l_1sub = Activation('relu',
                                name='%s_sep7x7_sub_1_relu' % name)(l_1sub)
            l_1sub = BatchNormalization(name='%s_sep7x7_sub_1_bn' %
                                        name)(l_1sub)
            add1 = Add()([l, l_1sub])
        else:
            add1 = l

        l = SeparableConv2D(filter_o, (3, 3),
                            strides=(stride, stride),
                            padding='same',
                            name='%s_sep3x3_1' % name,
                            trainable=trainable,
                            depthwise_regularizer=OrthLocalRegSep2D)(hi)
        l = Activation('relu', name='%s_sep3x3_1_relu' % name)(l)
        l = BatchNormalization(name='%s_sep3x3_1_bn' % name)(l)

        if h_i1sub is not None:
            l_1sub = SeparableConv2D(
                filter_o, (7, 7),
                strides=(stride, stride),
                padding='same',
                name='%s_sep7x7_sub_2' % name,
                trainable=trainable,
                depthwise_regularizer=OrthLocalRegSep2D)(h_i1sub)
            l_1sub = Activation('relu',
                                name='%s_sep7x7_sub_2_relu' % name)(l_1sub)
            l_1sub = BatchNormalization(name='%s_sep7x7_sub_2_bn' %
                                        name)(l_1sub)
            add2 = Add()([l, l_1sub])
        else:
            add2 = l

        l = AveragePooling2D(pool_size=(3, 3),
                             strides=(stride, stride),
                             padding='same')(hi)
        if filter_i != filter_o:
            l = Conv2D(filter_o, (1, 1),
                       strides=(1, 1),
                       padding='same',
                       name='%s_sep1x1_align' % name,
                       trainable=trainable)(l)
            l = Activation('relu', name='%s_sep1x1_align_relu' % name)(l)
            l = BatchNormalization(name='%s_sep1x1_align_bn' % name)(l)
        if h_i1sub is not None:
            l_1sub = SeparableConv2D(
                filter_o, (5, 5),
                strides=(stride, stride),
                padding='same',
                name='%s_sep5x5_sub_1' % name,
                trainable=trainable,
                depthwise_regularizer=OrthLocalRegSep2D)(h_i1sub)
            l_1sub = Activation('relu',
                                name='%s_sep5x5_sub_1_relu' % name)(l_1sub)
            l_1sub = BatchNormalization(name='%s_sep5x5_sub_1_bn' %
                                        name)(l_1sub)
            add3 = Add()([l, l_1sub])
        else:
            add3 = l
        """
        SubLayer 2
        """
        l = MaxPooling2D(pool_size=(3, 3),
                         strides=(stride, stride),
                         padding='same')(hi)
        if filter_i != filter_o:
            l = Conv2D(filter_o, (1, 1),
                       strides=(1, 1),
                       padding='same',
                       name='%s_sep1x1_align_2' % name,
                       trainable=trainable)(l)
            l = Activation('relu', name='%s_sep1x1_align_2_relu' % name)(l)
            l = BatchNormalization(name='%s_sep1x1_align_2_bn' % name)(l)
        l_1sub = SeparableConv2D(filter_o, (3, 3),
                                 padding='same',
                                 name='%s_sep3x3_2' % name,
                                 trainable=trainable,
                                 depthwise_regularizer=OrthLocalRegSep2D)(add1)
        l_1sub = Activation('relu', name='%s_sep3x3_2_relu' % name)(l_1sub)
        l_1sub = BatchNormalization(name='%s_sep3x3_2_bn' % name)(l_1sub)
        add4 = Add()([l, l_1sub])

        l = AveragePooling2D(pool_size=(3, 3), strides=(1, 1),
                             padding='same')(add1)
        add5 = Add()([l, add2])

        result = concatenate([add3, add4, add5])

        if is_tail:
            result = Conv2D(filter_o, (1, 1),
                            strides=(1, 1),
                            padding='same',
                            name='%s_result1x1_align' % name,
                            trainable=trainable,
                            kernel_regularizer=OrthLocalReg2D)(result)
            result = Activation('relu',
                                name='%s_result1x1_align_relu' % name)(result)
            result = BatchNormalization(name='%s_result1x1_align_bn' %
                                        name)(result)
        return result
Example #19
0
 def sepconv(n, f, k, s=1):
     """ Depthwise separable convolution layer """
     n = SeparableConv2D(f, k, padding='same', activation=acti,
                         strides=s)(n)
     return n
Example #20
0
    def NormalCell(hi,
                   h_i1sub,
                   filter_i,
                   filter_o,
                   stride=1,
                   name="NAS",
                   use_deform=False,
                   is_tail=False):
        """
        adjust feature size & channel size
        """

        hi = Conv2D(filter_o, (1, 1),
                    padding='same',
                    name='%s_hi_align' % name,
                    trainable=trainable,
                    kernel_regularizer=OrthLocalReg2D)(hi)
        hi = Activation('relu', name='%s_hi_align_relu' % name)(hi)
        hi = BatchNormalization(name='%s_hi_align_bn' % name)(hi)

        if use_deform:
            hi = SepConvOffset2D(filter_o,
                                 name='%s_deform_conv' % name)(hi,
                                                               use_resam=True)

        h_i1sub = _adjust_block(h_i1sub, hi, filter_o, 0, name)
        """
        SubLayer
        """

        l = SeparableConv2D(filter_o, (3, 3),
                            padding='same',
                            name='%s_sep3x3_1' % name,
                            trainable=trainable,
                            depthwise_regularizer=OrthLocalRegSep2D)(hi)
        l = Activation('relu', name='%s_sep3x3_1_relu' % name)(l)
        l = BatchNormalization(name='%s_sep3x3_1_bn' % name)(l)

        add1 = Add()([l, hi])

        l = SeparableConv2D(filter_o, (5, 5),
                            padding='same',
                            name='%s_sep5x5_1' % name,
                            trainable=trainable,
                            depthwise_regularizer=OrthLocalRegSep2D)(hi)
        l = Activation('relu', name='%s_sep5x5_1_relu' % name)(l)
        l = BatchNormalization(name='%s_sep5x5_1_bn' % name)(l)

        if h_i1sub is not None:
            l_1sub = SeparableConv2D(
                filter_o, (3, 3),
                padding='same',
                name='%s_sep3x3_sub_1' % name,
                trainable=trainable,
                depthwise_regularizer=OrthLocalRegSep2D)(h_i1sub)
            l_1sub = Activation('relu',
                                name='%s_sep3x3_sub_1_relu' % name)(l_1sub)
            l_1sub = BatchNormalization(name='%s_sep3x3_sub_1_bn' %
                                        name)(l_1sub)
            add2 = Add()([l, l_1sub])
        else:
            add2 = l

        l = SeparableConv2D(filter_o, (3, 3),
                            padding='same',
                            name='%s_sep3x3_2' % name,
                            trainable=trainable,
                            depthwise_regularizer=OrthLocalRegSep2D)(hi)
        l = Activation('relu', name='%s_sep3x3_2_relu' % name)(l)
        l = BatchNormalization(name='%s_sep3x3_2_bn' % name)(l)

        if h_i1sub is not None:
            l_1sub = SeparableConv2D(
                filter_o, (5, 5),
                padding='same',
                name='%s_sep5x5_sub_1' % name,
                trainable=trainable,
                depthwise_regularizer=OrthLocalRegSep2D)(h_i1sub)
            l_1sub = Activation('relu',
                                name='%s_sep5x5_sub_1_relu' % name)(l_1sub)
            l_1sub = BatchNormalization(name='%s_sep5x5_sub_1_bn' %
                                        name)(l_1sub)
            add3 = Add()([l, l_1sub])
        else:
            add3 = l

        if h_i1sub is not None:
            avg_p1 = AveragePooling2D(pool_size=(3, 3),
                                      strides=(stride, stride),
                                      padding='same')(h_i1sub)
            # avg_p2 = AveragePooling2D(pool_size=(3, 3), strides=(stride, stride), padding='same')(h_i1sub)
            # add4 = Add()([avg_p1, avg_p2])
            add4 = avg_p1

            l_1sub_1 = SeparableConv2D(
                filter_o, (3, 3),
                padding='same',
                name='%s_sep3x3_sub_2' % name,
                trainable=trainable,
                depthwise_regularizer=OrthLocalRegSep2D)(h_i1sub)
            l_1sub_1 = Activation('relu',
                                  name='%s_sep3x3_sub_2_relu' % name)(l_1sub_1)
            l_1sub_1 = BatchNormalization(name='%s_sep3x3_sub_2_bn' %
                                          name)(l_1sub_1)
            l_1sub_2 = SeparableConv2D(
                filter_o, (5, 5),
                padding='same',
                name='%s_sep5x5_sub_2' % name,
                trainable=trainable,
                depthwise_regularizer=OrthLocalRegSep2D)(h_i1sub)
            l_1sub_2 = Activation('relu',
                                  name='%s_sep5x5_sub_2_relu' % name)(l_1sub_2)
            l_1sub_2 = BatchNormalization(name='%s_sep5x5_sub_2_bn' %
                                          name)(l_1sub_2)
            add5 = Add()([l_1sub_1, l_1sub_2])

            result = concatenate([add1, add2, add3, add4, add5])

            if is_tail:
                result = Conv2D(filter_o, (1, 1),
                                strides=(1, 1),
                                padding='same',
                                name='%s_result1x1_align' % name,
                                trainable=trainable,
                                kernel_regularizer=OrthLocalReg2D)(result)
                result = Activation('relu',
                                    name='%s_result1x1_align_relu' %
                                    name)(result)
                result = BatchNormalization(name='%s_result1x1_align_bn' %
                                            name)(result)
            return result
        else:
            result = concatenate([add1, add2, add3])

            if is_tail:
                result = Conv2D(filter_o, (1, 1),
                                strides=(1, 1),
                                padding='same',
                                name='%s_result1x1_align' % name,
                                trainable=trainable,
                                kernel_regularizer=OrthLocalReg2D)(result)
                result = Activation('relu',
                                    name='%s_result1x1_align_relu' %
                                    name)(result)
                result = BatchNormalization(name='%s_result1x1_align_bn' %
                                            name)(result)
            return result
Example #21
0
def xception_base_network(input, padding=False):
    weights_path = get_file('xception_weights.h5',
                            TF_WEIGHTS_PATH_NO_TOP,
                            cache_subdir='models')
    #x = Lambda(lambda x: x*2-1)(img_input)
    blocks = []
    x = Conv2D(32, (3, 3), padding='same', use_bias=False,
               name='block1_conv1')(input)
    x = BatchNormalization(name='block1_conv1_bn')(x)
    x = Activation('relu', name='block1_conv1_act')(x)
    blocks.insert(0, x)
    x = Conv2D(64, (3, 3),
               strides=(2, 2),
               use_bias=False,
               name='block1_conv2',
               padding='same')(x)
    x = BatchNormalization(name='block1_conv2_bn')(x)
    x = Activation('relu', name='block1_conv2_act')(x)
    blocks.insert(0, x)
    residual = Conv2D(128, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(128, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block2_sepconv1')(x)
    x = BatchNormalization(name='block2_sepconv1_bn')(x)
    x = Activation('relu', name='block2_sepconv2_act')(x)
    x = SeparableConv2D(128, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block2_sepconv2')(x)
    x = BatchNormalization(name='block2_sepconv2_bn')(x)
    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block2_pool')(x)
    blocks.insert(0, x)
    x = layers.add([x, residual])

    residual = Conv2D(256, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)
    x = Activation('relu', name='block3_sepconv1_act')(x)
    x = SeparableConv2D(256, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block3_sepconv1')(x)
    x = BatchNormalization(name='block3_sepconv1_bn')(x)
    x = Activation('relu', name='block3_sepconv2_act')(x)
    x = SeparableConv2D(256, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block3_sepconv2')(x)
    x = BatchNormalization(name='block3_sepconv2_bn')(x)
    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block3_pool')(x)
    blocks.insert(0, x)
    x = layers.add([x, residual])

    residual = Conv2D(728, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)
    x = Activation('relu', name='block4_sepconv1_act')(x)
    x = SeparableConv2D(728, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block4_sepconv1')(x)
    x = BatchNormalization(name='block4_sepconv1_bn')(x)
    x = Activation('relu', name='block4_sepconv2_act')(x)
    x = SeparableConv2D(728, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block4_sepconv2')(x)
    x = BatchNormalization(name='block4_sepconv2_bn')(x)
    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block4_pool')(x)
    blocks.insert(0, x)
    x = layers.add([x, residual])

    for i in range(8):
        residual = x
        prefix = 'block' + str(i + 5)

        x = Activation('relu', name=prefix + '_sepconv1_act')(x)
        x = SeparableConv2D(728, (3, 3),
                            padding='same',
                            use_bias=False,
                            name=prefix + '_sepconv1')(x)
        x = BatchNormalization(name=prefix + '_sepconv1_bn')(x)
        x = Activation('relu', name=prefix + '_sepconv2_act')(x)
        x = SeparableConv2D(728, (3, 3),
                            padding='same',
                            use_bias=False,
                            name=prefix + '_sepconv2')(x)
        x = BatchNormalization(name=prefix + '_sepconv2_bn')(x)
        x = Activation('relu', name=prefix + '_sepconv3_act')(x)
        x = SeparableConv2D(728, (3, 3),
                            padding='same',
                            use_bias=False,
                            name=prefix + '_sepconv3')(x)
        x = BatchNormalization(name=prefix + '_sepconv3_bn')(x)
        x = layers.add([x, residual])

    residual = Conv2D(1024, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block13_sepconv1_act')(x)
    x = SeparableConv2D(728, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block13_sepconv1')(x)
    x = BatchNormalization(name='block13_sepconv1_bn')(x)
    x = Activation('relu', name='block13_sepconv2_act')(x)
    x = SeparableConv2D(1024, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block13_sepconv2')(x)
    x = BatchNormalization(name='block13_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block13_pool')(x)
    blocks.insert(0, x)
    x = layers.add([x, residual])

    x = SeparableConv2D(1536, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block14_sepconv1')(x)
    x = BatchNormalization(name='block14_sepconv1_bn')(x)
    x = Activation('relu', name='block14_sepconv1_act')(x)

    x = SeparableConv2D(2048, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block14_sepconv2')(x)
    x = BatchNormalization(name='block14_sepconv2_bn')(x)
    x = Activation('relu', name='block14_sepconv2_act')(x)

    x = GlobalAveragePooling2D()(x)

    model = Model(input, x, name='xception')

    model.load_weights(weights_path)
    return model, x, blocks
Example #22
0
def define_model(input_shape, num_classes, regularization):
    # base
    img_input = Input(input_shape)
    x = Conv2D(filters=8,
               kernel_size=(3, 3),
               strides=(1, 1),
               kernel_regularizer=regularization,
               use_bias=False)(img_input)
    # 8 dimensions of input, 8*3*3 filters, (8*3*3+1)*feature maps as output
    # size of output CNN layer = input_shape-(filter_size-1) = 48-2 = 46
    # Convolutional network should not be less than the input, so padding is done.
    # If we want to explicitly want to downsample the image during the convolutional, we can define a stride.
    # To calculate padding, input_size + 2 * padding_size-(filter_size-1) = 48+(2*1)-2 = 48
    # number of parameters the network learned = (n*m*k+1)*f = (filter_size[0]*filter_size[1]*feature_map_as_output+1)*no_of_filters
    # Dropout : removes the nodes that are below the weights mentioned.

    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Conv2D(filters=8,
               kernel_size=(3, 3),
               strides=(1, 1),
               kernel_regularizer=regularization,
               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # module 1
    residual = Conv2D(filters=16,
                      kernel_size=(1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)
    x = SeparableConv2D(filters=16,
                        kernel_size=(3, 3),
                        padding='same',
                        kernel_regularizer=regularization,
                        use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = SeparableConv2D(filters=16,
                        kernel_size=(3, 3),
                        padding='same',
                        kernel_regularizer=regularization,
                        use_bias=False)(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
    x = layers.add([x, residual])

    # module 2
    residual = Conv2D(filters=32,
                      kernel_size=(1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)
    x = SeparableConv2D(filters=32,
                        kernel_size=(3, 3),
                        padding='same',
                        kernel_regularizer=regularization,
                        use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = SeparableConv2D(filters=32,
                        kernel_size=(3, 3),
                        padding='same',
                        kernel_regularizer=regularization,
                        use_bias=False)(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
    x = layers.add([x, residual])

    # module 3
    residual = Conv2D(filters=64,
                      kernel_size=(1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)
    x = SeparableConv2D(filters=64,
                        kernel_size=(3, 3),
                        padding='same',
                        kernel_regularizer=regularization,
                        use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = SeparableConv2D(filters=64,
                        kernel_size=(3, 3),
                        padding='same',
                        kernel_regularizer=regularization,
                        use_bias=False)(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
    x = layers.add([x, residual])

    # module 4
    residual = Conv2D(filters=128,
                      kernel_size=(1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)
    x = SeparableConv2D(filters=128,
                        kernel_size=(3, 3),
                        padding='same',
                        kernel_regularizer=regularization,
                        use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = SeparableConv2D(filters=128,
                        kernel_size=(3, 3),
                        padding='same',
                        kernel_regularizer=regularization,
                        use_bias=False)(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
    x = layers.add([x, residual])
    x = Conv2D(filters=num_classes, kernel_size=(3, 3), padding='same')(x)
    x = GlobalAveragePooling2D()(x)
    output = Activation('softmax', name='predictions')(x)
    return img_input, output
Example #23
0
def Xception(input_shape=None, classes=1000):
    img_input = Input(shape=input_shape)

    x = Conv2D(32, (3, 3), strides=(2, 2), use_bias=False,
               name='block1_conv1')(img_input)
    x = BatchNormalization(name='block1_conv1_bn')(x)
    x = Activation('relu', name='block1_conv1_act')(x)
    x = Conv2D(64, (3, 3), use_bias=False, name='block1_conv2')(x)
    x = BatchNormalization(name='block1_conv2_bn')(x)
    x = Activation('relu', name='block1_conv2_act')(x)

    residual = Conv2D(128, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(128, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block2_sepconv1')(x)
    x = BatchNormalization(name='block2_sepconv1_bn')(x)
    x = Activation('relu', name='block2_sepconv2_act')(x)
    x = SeparableConv2D(128, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block2_sepconv2')(x)
    x = BatchNormalization(name='block2_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block2_pool')(x)
    x = layers.add([x, residual])

    residual = Conv2D(256, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block3_sepconv1_act')(x)
    x = SeparableConv2D(256, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block3_sepconv1')(x)
    x = BatchNormalization(name='block3_sepconv1_bn')(x)
    x = Activation('relu', name='block3_sepconv2_act')(x)
    x = SeparableConv2D(256, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block3_sepconv2')(x)
    x = BatchNormalization(name='block3_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block3_pool')(x)
    x = layers.add([x, residual])

    residual = Conv2D(728, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block4_sepconv1_act')(x)
    x = SeparableConv2D(728, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block4_sepconv1')(x)
    x = BatchNormalization(name='block4_sepconv1_bn')(x)
    x = Activation('relu', name='block4_sepconv2_act')(x)
    x = SeparableConv2D(728, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block4_sepconv2')(x)
    x = BatchNormalization(name='block4_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block4_pool')(x)
    x = layers.add([x, residual])

    for i in range(8):
        residual = x
        prefix = 'block' + str(i + 5)

        x = Activation('relu', name=prefix + '_sepconv1_act')(x)
        x = SeparableConv2D(728, (3, 3),
                            padding='same',
                            use_bias=False,
                            name=prefix + '_sepconv1')(x)
        x = BatchNormalization(name=prefix + '_sepconv1_bn')(x)
        x = Activation('relu', name=prefix + '_sepconv2_act')(x)
        x = SeparableConv2D(728, (3, 3),
                            padding='same',
                            use_bias=False,
                            name=prefix + '_sepconv2')(x)
        x = BatchNormalization(name=prefix + '_sepconv2_bn')(x)
        x = Activation('relu', name=prefix + '_sepconv3_act')(x)
        x = SeparableConv2D(728, (3, 3),
                            padding='same',
                            use_bias=False,
                            name=prefix + '_sepconv3')(x)
        x = BatchNormalization(name=prefix + '_sepconv3_bn')(x)

        x = layers.add([x, residual])

    residual = Conv2D(1024, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block13_sepconv1_act')(x)
    x = SeparableConv2D(728, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block13_sepconv1')(x)
    x = BatchNormalization(name='block13_sepconv1_bn')(x)
    x = Activation('relu', name='block13_sepconv2_act')(x)
    x = SeparableConv2D(1024, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block13_sepconv2')(x)
    x = BatchNormalization(name='block13_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block13_pool')(x)
    x = layers.add([x, residual])

    x = SeparableConv2D(1536, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block14_sepconv1')(x)
    x = BatchNormalization(name='block14_sepconv1_bn')(x)
    x = Activation('relu', name='block14_sepconv1_act')(x)

    x = SeparableConv2D(2048, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block14_sepconv2')(x)
    x = BatchNormalization(name='block14_sepconv2_bn')(x)
    x = Activation('relu', name='block14_sepconv2_act')(x)

    x = GlobalAveragePooling2D(name='avg_pool')(x)
    x = Dense(classes, activation='softmax', name='predictions')(x)

    model = Model(img_input, x, name='xception')

    return model
Example #24
0
from .neural_layers import expand_dims

def create_network((input_shape, prior, L1=0, L2=0, dropout=0, KERNEL_NUM=64):
    resnet = ResNet50(include_top=False, input_shape=input_shape)
    stage_names = ['activation_10', 'activation_22', 'activation_40', 'activation_49']
    stages = [resnet.get_layer(stage_name).output for stage_name in stage_names]

    feature_model = Model(inputs=resnet.input, outputs=stages)
    for l in feature_model.layers:
        l.trainable = False

    # Feature Pyramid Network
    inp = Input(input_shape)
    features_at_stages = feature_model(inp)

    x = SeparableConv2D(KERNEL_NUM, 1, padding='same', kernel_regularizer=l1_l2(l1=L1, l2=L2))(features_at_stages[-1])
    x = BatchNormalization()(x)
    P = LeakyReLU(name='P5')(x)
    pyramid_features = [P]

    for i in range(2,5):
        x = SeparableConv2D(KERNEL_NUM, 1, padding='same', kernel_regularizer=l1_l2(l1=L1, l2=L2))(features_at_stages[-i])
        x = BatchNormalization()(x)
        x = LeakyReLU()(x)

        y = UpSampling2D(interpolation='bilinear')(P)
        z = Add()([x,y])

        z = SeparableConv2D(KERNEL_NUM, 3, padding='same', kernel_regularizer=l1_l2(l1=L1, l2=L2))(z)
        z = BatchNormalization()(z)
        P = LeakyReLU(name='P{}'.format(6-i))(z)
def get_test_model_full():
    """Returns a maximally complex test model,
    using all supported layer types with different parameter combination.
    """
    input_shapes = [
        (26, 28, 3),
        (4, 4, 3),
        (4, 4, 3),
        (4, ),
        (2, 3),
        (27, 29, 1),
        (17, 1),
        (17, 4),
    ]
    inputs = [Input(shape=s) for s in input_shapes]

    outputs = []

    for inp in inputs[6:8]:
        for padding in ['valid', 'same']:
            for s in range(1, 6):
                for out_channels in [1, 2]:
                    for d in range(1, 4):
                        outputs.append(
                            Conv1D(out_channels,
                                   s,
                                   padding=padding,
                                   dilation_rate=d)(inp))
        for padding_size in range(0, 5):
            outputs.append(ZeroPadding1D(padding_size)(inp))
        for crop_left in range(0, 2):
            for crop_right in range(0, 2):
                outputs.append(Cropping1D((crop_left, crop_right))(inp))
        for upsampling_factor in range(1, 5):
            outputs.append(UpSampling1D(upsampling_factor)(inp))
        for padding in ['valid', 'same']:
            for pool_factor in range(1, 6):
                for s in range(1, 4):
                    outputs.append(
                        MaxPooling1D(pool_factor, strides=s,
                                     padding=padding)(inp))
                    outputs.append(
                        AveragePooling1D(pool_factor,
                                         strides=s,
                                         padding=padding)(inp))
        outputs.append(GlobalMaxPooling1D()(inp))
        outputs.append(GlobalAveragePooling1D()(inp))

    for inp in [inputs[0], inputs[5]]:
        for padding in ['valid', 'same']:
            for h in range(1, 6):
                for out_channels in [1, 2]:
                    for d in range(1, 4):
                        outputs.append(
                            Conv2D(out_channels, (h, 1),
                                   padding=padding,
                                   dilation_rate=(d, 1))(inp))
                        outputs.append(
                            SeparableConv2D(out_channels, (h, 1),
                                            padding=padding,
                                            dilation_rate=(d, 1))(inp))
                    for sy in range(1, 4):
                        outputs.append(
                            Conv2D(out_channels, (h, 1),
                                   strides=(1, sy),
                                   padding=padding)(inp))
                        outputs.append(
                            SeparableConv2D(out_channels, (h, 1),
                                            strides=(sy, sy),
                                            padding=padding)(inp))
                for sy in range(1, 4):
                    outputs.append(
                        MaxPooling2D((h, 1), strides=(1, sy),
                                     padding=padding)(inp))
            for w in range(1, 6):
                for out_channels in [1, 2]:
                    for d in range(1, 4) if sy == 1 else [1]:
                        outputs.append(
                            Conv2D(out_channels, (1, w),
                                   padding=padding,
                                   dilation_rate=(1, d))(inp))
                        outputs.append(
                            SeparableConv2D(out_channels, (1, w),
                                            padding=padding,
                                            dilation_rate=(1, d))(inp))
                    for sx in range(1, 4):
                        outputs.append(
                            Conv2D(out_channels, (1, w),
                                   strides=(sx, 1),
                                   padding=padding)(inp))
                        outputs.append(
                            SeparableConv2D(out_channels, (1, w),
                                            strides=(sx, sx),
                                            padding=padding)(inp))
                for sx in range(1, 4):
                    outputs.append(
                        MaxPooling2D((1, w), strides=(1, sx),
                                     padding=padding)(inp))
    outputs.append(ZeroPadding2D(2)(inputs[0]))
    outputs.append(ZeroPadding2D((2, 3))(inputs[0]))
    outputs.append(ZeroPadding2D(((1, 2), (3, 4)))(inputs[0]))
    outputs.append(Cropping2D(2)(inputs[0]))
    outputs.append(Cropping2D((2, 3))(inputs[0]))
    outputs.append(Cropping2D(((1, 2), (3, 4)))(inputs[0]))
    for y in range(1, 3):
        for x in range(1, 3):
            outputs.append(UpSampling2D(size=(y, x))(inputs[0]))
    outputs.append(GlobalAveragePooling2D()(inputs[0]))
    outputs.append(GlobalMaxPooling2D()(inputs[0]))
    outputs.append(AveragePooling2D((2, 2))(inputs[0]))
    outputs.append(MaxPooling2D((2, 2))(inputs[0]))
    outputs.append(UpSampling2D((2, 2))(inputs[0]))
    outputs.append(keras.layers.concatenate([inputs[0], inputs[0]]))
    outputs.append(Dropout(0.5)(inputs[0]))

    outputs.append(BatchNormalization()(inputs[0]))
    outputs.append(BatchNormalization(center=False)(inputs[0]))
    outputs.append(BatchNormalization(scale=False)(inputs[0]))

    outputs.append(Conv2D(2, (3, 3), use_bias=True)(inputs[0]))
    outputs.append(Conv2D(2, (3, 3), use_bias=False)(inputs[0]))
    outputs.append(SeparableConv2D(2, (3, 3), use_bias=True)(inputs[0]))
    outputs.append(SeparableConv2D(2, (3, 3), use_bias=False)(inputs[0]))

    outputs.append(Dense(2, use_bias=True)(inputs[3]))
    outputs.append(Dense(2, use_bias=False)(inputs[3]))

    shared_conv = Conv2D(1, (1, 1),
                         padding='valid',
                         name='shared_conv',
                         activation='relu')

    up_scale_2 = UpSampling2D((2, 2))
    x1 = shared_conv(up_scale_2(inputs[1]))  # (1, 8, 8)
    x2 = shared_conv(up_scale_2(inputs[2]))  # (1, 8, 8)
    x3 = Conv2D(1, (1, 1), padding='valid')(up_scale_2(inputs[2]))  # (1, 8, 8)
    x = keras.layers.concatenate([x1, x2, x3])  # (3, 8, 8)
    outputs.append(x)

    x = Conv2D(3, (1, 1), padding='same', use_bias=False)(x)  # (3, 8, 8)
    outputs.append(x)
    x = Dropout(0.5)(x)
    outputs.append(x)
    x = keras.layers.concatenate(
        [MaxPooling2D((2, 2))(x),
         AveragePooling2D((2, 2))(x)])  # (6, 4, 4)
    outputs.append(x)

    x = Flatten()(x)  # (1, 1, 96)
    x = Dense(4, use_bias=False)(x)
    outputs.append(x)
    x = Dense(3)(x)  # (1, 1, 3)
    outputs.append(x)

    intermediate_input_shape = (3, )
    intermediate_in = Input(intermediate_input_shape)
    intermediate_x = intermediate_in
    intermediate_x = Dense(8)(intermediate_x)
    intermediate_x = Dense(5)(intermediate_x)
    intermediate_model = Model(inputs=[intermediate_in],
                               outputs=[intermediate_x],
                               name='intermediate_model')
    intermediate_model.compile(loss='mse', optimizer='nadam')

    x = intermediate_model(x)  # (1, 1, 5)

    intermediate_model_2 = Sequential()
    intermediate_model_2.add(Dense(7, input_shape=(5, )))
    intermediate_model_2.add(Dense(5))
    intermediate_model_2.compile(optimizer='rmsprop',
                                 loss='categorical_crossentropy')

    x = intermediate_model_2(x)  # (1, 1, 5)

    x = Dense(3)(x)  # (1, 1, 3)

    shared_activation = Activation('tanh')

    outputs = outputs + [
        Activation('tanh')(inputs[3]),
        Activation('hard_sigmoid')(inputs[3]),
        Activation('selu')(inputs[3]),
        Activation('sigmoid')(inputs[3]),
        Activation('softplus')(inputs[3]),
        Activation('softmax')(inputs[3]),
        Activation('relu')(inputs[3]),
        LeakyReLU()(inputs[3]),
        ELU()(inputs[3]),
        shared_activation(inputs[3]),
        inputs[4],
        inputs[1],
        x,
        shared_activation(x),
    ]

    print('Model has {} outputs.'.format(len(outputs)))

    model = Model(inputs=inputs, outputs=outputs, name='test_model_full')
    model.compile(loss='mse', optimizer='nadam')

    # fit to dummy data
    training_data_size = 1
    batch_size = 1
    epochs = 10
    data_in = generate_input_data(training_data_size, input_shapes)
    data_out = generate_output_data(training_data_size, outputs)
    model.fit(data_in, data_out, epochs=epochs, batch_size=batch_size)
    return model
    def Squeezefacenet(self):

        input_img = Input(shape=self.input_shape)

        #y = Input(shape=(self.classes, ))
        #stage 0
        x = self.conv_block(input_img, 48, 1)
        x = self.conv_block(x, 96, 2, strides=(2, 2))

        #stage 1
        x1 = self.fire_squeeze(x, 16, 1)
        x1 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2),
                          name='maxpool1')(x1)

        #stage 2
        x2 = self.fire_squeeze(x1, 32, 2)

        #stage 3
        x3 = self.fire_squeeze(x2, 32, 3)
        merge2_3 = add([x2, x3], name="add2_3")

        #stage 4
        x4 = self.fire_squeeze(merge2_3, 32, 4)
        x4 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2),
                          name='maxpool4')(x4)

        #stage 5
        x5 = self.fire_squeeze(x4, 32, 5)
        merge4_5 = add([x4, x5], name="add4_5")

        #stage 6
        x6 = self.fire_squeeze(merge4_5, 32, 6)

        #stage 7
        x7 = self.fire_squeeze(x6, 32, 7)
        merge6_7 = add([x6, x7], name="add6_7")

        #stage 8
        x8 = self.fire_squeeze(merge6_7, 32, 8)
        x8 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2),
                          name='maxpool8')(x8)

        #stage 9
        x9 = self.fire_squeeze(x8, 32, 9)
        merge8_9 = add([x8, x9], name="add8_9")

        #GDC
        x10 = SeparableConv2D(256, (6, 6),
                              strides=(1, 1),
                              depth_multiplier=1,
                              activation=None,
                              depthwise_initializer='glorot_uniform',
                              pointwise_initializer='glorot_uniform',
                              name='s_conv')(merge8_9)
        x10 = BatchNormalization(epsilon=1e-5, momentum=0.9,
                                 name='sconv_bn')(x10)
        #x10 = PReLU('zero', name='sconv_prelu')(x10)
        #x10 = Activation('relu', name='sconv_relu')(x10)
        #x10 = self.conv_block(x10, 128, 3, kernel=(1, 1))
        x10 = Conv2D(128, (1, 1),
                     strides=1,
                     padding='same',
                     activation=None,
                     kernel_initializer='glorot_uniform',
                     bias_initializer='zeros',
                     kernel_regularizer=l2(weight_decay),
                     name='conv_out')(x10)
        x10 = BatchNormalization(epsilon=1e-5, momentum=0.9,
                                 name='conv10_bn')(x10)
        #out
        #out_drop = Dropout(0.5, name='dropout')(x10)
        out_flatten = Flatten(name='flatten')(x10)

        #out = ArcFace(self.classes, regularizer=l2(weight_decay))([out_flatten, y])
        out = Dense(self.classes,
                    kernel_initializer='glorot_uniform',
                    name='dense')(out_flatten)

        self.model = Model(inputs=input_img, outputs=out)
Example #27
0
 def _convBlock(self, x, num_filters, activation, kernel_size=(3,3)):
     x = SeparableConv2D(num_filters,kernel_size,padding='same')(x)
     x = BatchNormalization(axis=-1)(x)
     x = Activation(activation)(x)
     return x
Example #28
0
def miniXception_loader(input_tensor=None,
                        input_shape=None,
                        weights=None,
                        output_size=1024,
                        return_model=False,
                        pooling=None,
                        normalize=False,
                        binary=False,
                        regularize=None):
    """
    Instantiates either a Tensor or a Model for the Core Embedding Network
    Optionally loads weights pre-trained
    This model is available for TensorFlow only,
    and can only be used with inputs following the TensorFlow
    data format `(width, height, channels)`.
    Min input image size for this model is 128x128
    # Arguments
        include_top: whether to include the fully-connected
            layer at the top of the network.
        weights: one of `None` (random initialization)
            or "imagenet" (pre-training on ImageNet).
        input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
            to use as image input for the model.
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(299, 299, 3)`.
            It should have exactly 3 inputs channels,
            and width and height should be no smaller than 71.
            E.g. `(150, 150, 3)` would be one valid value.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model will be
                the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a 2D tensor.
            - `max` means that global max pooling will
                be applied.

    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape.
        RuntimeError: If attempting to run this model with a
            backend that does not support separable convolutions.
    """

    if K.backend() != 'tensorflow':
        raise RuntimeError('The model is only available with '
                           'the TensorFlow backend.')
    if K.image_data_format() != 'channels_last':
        warnings.warn(
            'The  model is only available for the '
            'input data format "channels_last" '
            '(width, height, channels). '
            'However your settings specify the default '
            'data format "channels_first" (channels, width, height). '
            'You should set `image_data_format="channels_last"` in your Keras '
            'config located at ~/.keras/keras.json. '
            'The model being returned right now will expect inputs '
            'to follow the "channels_last" data format.')
        K.set_image_data_format('channels_last')
        old_data_format = 'channels_first'
    else:
        old_data_format = None

    # Determine proper input shape
    '''
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=299,
                                      min_size=71,
                                      data_format=K.image_data_format(),
                                      include_top=include_top)
    '''
    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    x = Conv2D(32, (3, 3), strides=(2, 2), use_bias=False,
               name='block1_conv1')(img_input)
    x = BatchNormalization(name='block1_conv1_bn')(x)
    x = Activation('relu', name='block1_conv1_act')(x)

    x = Conv2D(64, (3, 3), use_bias=False, name='block1_conv2')(x)
    x = BatchNormalization(name='block1_conv2_bn')(x)
    x = Activation('relu', name='block1_conv2_act')(x)

    residual = Conv2D(128, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Dropout(rate=0.1)(x)
    x = SeparableConv2D(128, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block2_sepconv1')(x)
    x = BatchNormalization(name='block2_sepconv1_bn')(x)
    x = Activation('relu', name='block2_sepconv2_act')(x)
    x = SeparableConv2D(128, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block2_sepconv2')(x)
    x = BatchNormalization(name='block2_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block2_pool')(x)
    x = layers.add([x, residual])

    residual = Conv2D(256, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Dropout(rate=0.1)(x)
    x = Activation('relu', name='block3_sepconv1_act')(x)
    x = SeparableConv2D(256, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block3_sepconv1')(x)
    x = BatchNormalization(name='block3_sepconv1_bn')(x)
    x = Activation('relu', name='block3_sepconv2_act')(x)
    x = SeparableConv2D(256, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block3_sepconv2')(x)
    x = BatchNormalization(name='block3_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block3_pool')(x)
    x = layers.add([x, residual])

    residual = Conv2D(256, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Dropout(rate=0.1)(x)
    x = Activation('relu', name='block4_sepconv1_act')(x)

    x = SeparableConv2D(256, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block4_sepconv1')(x)
    x = BatchNormalization(name='block4_sepconv1_bn')(x)
    x = Activation('relu', name='block4_sepconv2_act')(x)
    x = SeparableConv2D(256, (3, 3),
                        padding='same',
                        use_bias=False,
                        name='block4_sepconv2')(x)
    x = BatchNormalization(name='block4_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3),
                     strides=(2, 2),
                     padding='same',
                     name='block4_pool')(x)
    x = layers.add([x, residual])

    for i in range(1):
        residual = x
        prefix = 'block' + str(i + 5)

        x = Dropout(rate=0.1)(x)
        x = Activation('relu', name=prefix + '_sepconv1_act')(x)
        x = SeparableConv2D(256, (3, 3),
                            padding='same',
                            use_bias=False,
                            name=prefix + '_sepconv1')(x)
        x = BatchNormalization(name=prefix + '_sepconv1_bn')(x)
        x = Activation('relu', name=prefix + '_sepconv2_act')(x)
        x = SeparableConv2D(256, (3, 3),
                            padding='same',
                            use_bias=False,
                            name=prefix + '_sepconv2')(x)
        x = BatchNormalization(name=prefix + '_sepconv2_bn')(x)
        #x = Activation('relu', name=prefix + '_sepconv3_act')(x)
        #x = SeparableConv2D(512, (3, 3), padding='same', use_bias=False, name=prefix + '_sepconv3')(x)
        #x = BatchNormalization(name=prefix + '_sepconv3_bn')(x)

        x = layers.add([x, residual])
    '''
    residual = Conv2D(1024, (1, 1), strides=(2, 2),
                      padding='same', use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block13_sepconv1_act')(x)
    x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block13_sepconv1')(x)
    x = BatchNormalization(name='block13_sepconv1_bn')(x)
    '''

    x = Activation('relu', name='block13_sepconv2_act')(x)
    #x = SeparableConv2D(output_size, (3, 3), padding='same', use_bias=False, name='block13_sepconv2')(x)
    # SeparableConv2D doesn't allow decreasing feature size(solved in newer versions of tensorflow
    x = Conv2D(output_size, (1, 1),
               strides=(1, 1),
               use_bias=False,
               name='block13_conv')(x)
    x = BatchNormalization(name='pre_embed')(x)
    '''
    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block13_pool')(x)
    x = layers.add([x, residual])

    x = SeparableConv2D(1536, (3, 3), padding='same', use_bias=False, name='block14_sepconv1')(x)
    x = BatchNormalization(name='block14_sepconv1_bn')(x)
    x = Activation('relu', name='block14_sepconv1_act')(x)

    x = SeparableConv2D(2048, (3, 3), padding='same', use_bias=False, name='block14_sepconv2')(x)
    x = BatchNormalization(name='block14_sepconv2_bn')(x)
    x = Activation('relu', name='block14_sepconv2_act')(x)
    '''
    if pooling == 'avg':
        x = GlobalAveragePooling2D(name='embeding')(x)
    elif pooling == 'max':
        x = GlobalMaxPooling2D(name='embeding')(x)
    elif pooling == 'rmac':
        # we have x16 reduction, so 128*128 input was reduced to 8*8
        x = MaxPooling2D((3, 3),
                         strides=(2, 2),
                         padding='valid',
                         name='embed_pool')(x)
        x = GlobalAveragePooling2D(name='embeding')(x)
    elif pooling == 'msrmac':
        s1 = GlobalAveragePooling2D(name='s1')(x)
        s2 = MaxPooling2D((2, 2), strides=(1, 1), padding='valid')(x)
        s2 = GlobalAveragePooling2D(name='s2')(s2)
        s4 = MaxPooling2D((4, 4), strides=(2, 2), padding='valid')(x)
        s4 = GlobalAveragePooling2D(name='s4')(s4)
        s8 = GlobalMaxPooling2D(name='s8')(x)
        x = layers.add([s1, s2, s4, s8], name='embeding')
    elif pooling == 'conv':
        x = Conv2D(output_size, (8, 8),
                   strides=(1, 1),
                   use_bias=False,
                   name='embed_conv')(x)
        x = Flatten(name='embeding')(x)
    else:
        x = Flatten(name='embeding')(x)

    if binary:
        x = Activation('sigmoid')(x)

    if normalize:
        x = Lambda(lambda q: K.l2_normalize(q, axis=-1), name='n_embedding')(x)

    if regularize is not None:
        #x = Activation('relu', name='prereg_act')(x)
        x = ActivityRegularization(l1=regularize)(x)

    # load weights
    if weights is not None:
        x.load_weights('Weights/' + weights)
        print('Loaded {}'.format(weights))

    if old_data_format:
        K.set_image_data_format(old_data_format)

    if return_model:
        return Model(img_input, x)
    else:
        return x
Example #29
0
def get_model2():
    try:
        model = load_model('./model0.h5')
    except IOError:
        nb_filter = 24
        growth_rate = 24
        compression = 0.5
        weight_decay = 1e-4
        global shape_d
        img_shape = shape_d

        inputs = Input([img_shape, img_shape, 31])
        x = inputs
        x = SeparableConv2D(nb_filter,
                            3,
                            kernel_initializer='he_uniform',
                            padding='same',
                            strides=[2, 2])(x)

        for i in range(1):
            x = res_block(x, nb_filter)
        x_256, nb_filter_256 = x, nb_filter
        nb_filter *= 2
        x = Conv2D(nb_filter,
                   3,
                   kernel_initializer='he_uniform',
                   padding='same',
                   strides=[2, 2])(x)

        for i in range(2):
            x = res_block(x, nb_filter)
        x_128, nb_filter_128 = x, nb_filter
        nb_filter *= 2
        x = Conv2D(nb_filter,
                   3,
                   kernel_initializer='he_uniform',
                   padding='same',
                   strides=[2, 2])(x)

        for i in range(2):
            x = res_block(x, nb_filter)
        x_64, nb_filter_64 = x, nb_filter
        nb_filter *= 2
        x = Conv2D(nb_filter,
                   3,
                   kernel_initializer='he_uniform',
                   padding='same',
                   strides=[2, 2])(x)

        for i in range(4):
            x = res_block(x, nb_filter)

        x, nb_filter = res_up(x, nb_filter, x_64, nb_filter_64)
        for i in range(2):
            x = res_block(x, nb_filter)

        x, nb_filter = res_up(x, nb_filter, x_128, nb_filter_128)
        for i in range(2):
            x = res_block(x, nb_filter)

        x, nb_filter = res_up(x, nb_filter, x_256, nb_filter_256)
        for i in range(1):
            x = res_block(x, nb_filter)

        x = Conv2DTranspose(nb_filter,
                            3,
                            kernel_initializer='he_uniform',
                            padding='same',
                            strides=[2, 2])(x)
        # x = BatchNormalization(axis=-1, gamma_regularizer=regularizers.l2(weight_decay),beta_regularizer=regularizers.l2(weight_decay))(x)
        x = Activation('relu')(x)

        x = Conv2D(
            int(nb_filter * 0.5),
            3,
            kernel_initializer='he_uniform',
            padding='same',
        )(x)
        # x = Conv2D(int(nb_filter*0.5), (3,3), kernel_initializer='he_uniform', padding='same', use_bias=False,kernel_regularizer=regularizers.l2(weight_decay))(x)
        # x = BatchNormalization(axis=-1, gamma_regularizer=regularizers.l2(weight_decay),beta_regularizer=regularizers.l2(weight_decay))(x)
        x = Activation('relu')(x)

        x = Conv2D(
            int(nb_filter * 0.5),
            3,
            kernel_initializer='he_uniform',
            padding='same',
        )(x)
        # x = Conv2D(int(nb_filter*0.5), (3,3), kernel_initializer='he_uniform', padding='same', use_bias=False,kernel_regularizer=regularizers.l2(weight_decay))(x)
        # x = BatchNormalization(axis=-1, gamma_regularizer=regularizers.l2(weight_decay),beta_regularizer=regularizers.l2(weight_decay))(x)
        x = Activation('relu')(x)

        x1 = Conv2D(1,
                    3,
                    kernel_initializer='he_uniform',
                    padding='same',
                    name='1',
                    activation='relu')(x)
        x2 = Conv2D(1,
                    3,
                    kernel_initializer='he_uniform',
                    padding='same',
                    name='2',
                    activation='relu')(x)
        x3 = Conv2D(1,
                    3,
                    kernel_initializer='he_uniform',
                    padding='same',
                    name='3',
                    activation='relu')(x)
        x4 = Conv2D(1,
                    3,
                    kernel_initializer='he_uniform',
                    padding='same',
                    name='4',
                    activation='relu')(x)
        x5 = Conv2D(1,
                    3,
                    kernel_initializer='he_uniform',
                    padding='same',
                    name='5',
                    activation='relu')(x)
        x6 = Conv2D(1,
                    3,
                    kernel_initializer='he_uniform',
                    padding='same',
                    name='6',
                    activation='relu')(x)

        model = Model(inputs, [x1, x2, x3, x4, x5, x6])
    return model
Example #30
0
# maxim = np.max(test_values)
# minim = np.min(test_values)
# test_values = (test_values - minim)/(maxim - minim + 1.0)
# print test_data.shape
# print test_values.shape

# model = Sequential()
# model.add(Dense(128, activation="relu", kernel_initializer="uniform", input_dim=m*n*3))
# model.add(Dense(32, activation="relu", kernel_initializer="uniform"))
# model.add(Dense(1, kernel_initializer='normal'))
# model.add(Activation("softmax"))

model = Sequential()

# model.add(Convolution2D(32, 3, strides=1, activation='relu', input_shape=(n, m, 3)))
model.add(SeparableConv2D(32, 3, strides=1, padding='same', depth_multiplier=1, activation='relu', input_shape=(n, m, 3)))
model.add(SeparableConv2D(32, 3, strides=1, padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='softmax'))

# train the model using SGD
# print("[INFO] compiling model...")
sgd = SGD(lr=0.1)
# model.compile(optimizer=sgd, loss="sparse_categorical_crossentropy", metrics=["accuracy"])
model.compile(optimizer=sgd, loss="mean_squared_error")