示例#1
0
def _inverted_res_block(inputs,
                        expansion,
                        stride,
                        alpha,
                        filters,
                        block_id,
                        skip_connection,
                        rate=1):
    in_channels = inputs._keras_shape[-1]
    pointwise_conv_filters = int(filters * alpha)
    pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
    x = inputs
    prefix = 'expanded_conv_{}_'.format(block_id)
    if block_id:
        # Expand

        x = Conv2D(expansion * in_channels,
                   kernel_size=1,
                   padding='same',
                   use_bias=False,
                   activation=None,
                   name=prefix + 'expand')(x)
        x = Activation(relu6, name=prefix + 'expand_relu')(x)
    else:
        prefix = 'expanded_conv_'
    # Depthwise
    x = DepthwiseConv2D(kernel_size=3,
                        strides=stride,
                        activation=None,
                        use_bias=False,
                        padding='same',
                        dilation_rate=(rate, rate),
                        name=prefix + 'depthwise')(x)

    x = Activation(relu6, name=prefix + 'depthwise_relu')(x)

    # Project
    x = Conv2D(pointwise_filters,
               kernel_size=1,
               padding='same',
               use_bias=False,
               activation=None,
               name=prefix + 'project')(x)

    if skip_connection:
        return Add(name=prefix + 'add')([inputs, x])

    # if in_channels == pointwise_filters and stride == 1:
    #    return Add(name='res_connect_' + str(block_id))([inputs, x])

    return x
示例#2
0
def EEGNet2(nb_classes=2,
            Chans=64,
            Samples=64,
            regRate=0.001,
            dropoutRate=0.25,
            kernLength=64,
            numFilters=8):

    input_conv = Input(shape=(1, Chans, Samples))

    conv_block1 = Conv2D(numFilters, (1, kernLength),
                         padding='same',
                         kernel_regularizer=l1_l2(l1=0.0, l2=0.0),
                         input_shape=(1, Chans, Samples),
                         use_bias=False)(input_conv)
    conv_block1 = BatchNormalization(axis=1)(conv_block1)
    conv_block1 = DepthwiseConv2D((Chans, 1),
                                  depthwise_regularizer=l1_l2(l1=regRate,
                                                              l2=regRate),
                                  use_bias=False)(conv_block1)
    conv_block1 = BatchNormalization(axis=1)(conv_block1)
    conv_block1 = Activation('elu')(conv_block1)
    conv_block1 = SpatialDropout2D(dropoutRate)(conv_block1)

    conv_block2 = SeparableConv2D(numFilters, (1, 8),
                                  depthwise_regularizer=l1_l2(l1=0.0,
                                                              l2=regRate),
                                  use_bias=False,
                                  padding='same')(conv_block1)
    conv_block2 = BatchNormalization(axis=1)(conv_block2)
    conv_block2 = Activation('elu', name='elu_2')(conv_block2)
    conv_block2 = AveragePooling2D((1, 4))(conv_block2)
    conv_block2 = SpatialDropout2D(dropoutRate, name='drop_2')(conv_block2)

    conv_block3 = SeparableConv2D(numFilters * 2, (1, 8),
                                  depth_multiplier=2,
                                  depthwise_regularizer=l1_l2(l1=0.0,
                                                              l2=regRate),
                                  use_bias=False,
                                  padding='same')(conv_block2)
    conv_block3 = BatchNormalization(axis=1)(conv_block3)
    conv_block3 = Activation('elu', name='elu_3')(conv_block3)
    conv_block3 = AveragePooling2D((1, 4))(conv_block3)
    conv_block3 = SpatialDropout2D(dropoutRate, name='drop_3')(conv_block3)

    flatten_layer = Flatten(name='flatten')(conv_block3)

    dense_layer = Dense(nb_classes, name='dense')(flatten_layer)
    out_put = Activation('softmax', name='softmax')(dense_layer)

    return Model(inputs=input_conv, outputs=out_put)
def SepConv_BN(x,
               filters,
               prefix,
               stride=1,
               kernel_size=3,
               rate=1,
               depth_activation=False,
               epsilon=1e-3):
    """ SepConv with BN between depthwise & pointwise. Optionally add activation after BN
        Implements right "same" padding for even kernel sizes
        Args:
            x: input tensor
            filters: num of filters in pointwise convolution
            prefix: prefix before name
            stride: stride at depthwise conv
            kernel_size: kernel size for depthwise convolution
            rate: atrous rate for depthwise convolution
            depth_activation: flag to use activation between depthwise & poinwise convs
            epsilon: epsilon to use in BN layer
    """

    if stride == 1:
        depth_padding = 'same'
    else:
        kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
        pad_total = kernel_size_effective - 1
        pad_beg = pad_total // 2
        pad_end = pad_total - pad_beg
        x = ZeroPadding2D((pad_beg, pad_end))(x)
        depth_padding = 'valid'

    if not depth_activation:
        x = Activation('relu')(x)
    x = DepthwiseConv2D((kernel_size, kernel_size),
                        strides=(stride, stride),
                        dilation_rate=(rate, rate),
                        padding=depth_padding,
                        use_bias=False,
                        name=prefix + '_depthwise')(x)
    x = BatchNormalization(name=prefix + '_depthwise_BN', epsilon=epsilon)(x)
    if depth_activation:
        x = Activation('relu')(x)
    x = Conv2D(filters, (1, 1),
               padding='same',
               use_bias=False,
               name=prefix + '_pointwise')(x)
    x = BatchNormalization(name=prefix + '_pointwise_BN', epsilon=epsilon)(x)
    if depth_activation:
        x = Activation('relu')(x)

    return x
示例#4
0
def MobileFaceNets(input_shape=(112, 112, 3), n_classes=10, k=128):
    """MobileFaceNets"""
    inputs = Input(shape=input_shape)  #112x112,(img-127.5)/255
    y = Input(shape=(n_classes, ))
    x = _conv_block(inputs, 64, (3, 3), strides=(2, 2))

    # depthwise conv3x3
    x = DepthwiseConv2D(3, strides=(1, 1), depth_multiplier=1,
                        padding='same')(x)
    x = BatchNormalization()(x)
    x = PReLU(cval)(x)
    #     x = Activation(relu)(x)

    # 5层bottleneck
    x = _inverted_residual_block(x, 64, (3, 3), t=2, strides=2, n=5)
    x = _inverted_residual_block(x, 128, (3, 3), t=4, strides=2, n=1)
    x = _inverted_residual_block(x, 128, (3, 3), t=2, strides=1, n=6)
    x = _inverted_residual_block(x, 128, (3, 3), t=4, strides=2, n=1)
    x = _inverted_residual_block(x, 128, (3, 3), t=2, strides=1, n=2)

    # conv1x1
    x = _conv_block(x, 512, (1, 1), strides=(1, 1))

    # linear GDConv7x7
    x = DepthwiseConv2D(7, strides=(1, 1), depth_multiplier=1,
                        padding='valid')(x)
    x = Dropout(0.3, name='Dropout')(x)

    x = Conv2D(k, (1, 1), padding='same')(x)
    x = Reshape((k, ))(x)
    # x 为embeddings, y为embeddings对应的类别标签,output为
    output = ArcFace(n_classes=n_classes,
                     regularizer=regularizers.l2(weight_decay))([x, y])

    model = Model([inputs, y], output)
    #     plot_model(model, to_file='images/MobileNetv2.png', show_shapes=True)
    print(model.input, model.output)
    return model
示例#5
0
def mixconv(inputs, kernel_sizes, strides, padding):
    convs = []
    for kernel_size in kernel_sizes:
        convs.append(
            DepthwiseConv2D(kernel_size, strides=strides, padding=padding))

    if len(convs) == 1:
        return convs[0](inputs)
    filters = inputs.shape[-1].value
    splits = split_channels(filters, len(convs))
    x_splits = tf.split(inputs, splits, -1)
    x_outputs = [c(x) for x, c in zip(x_splits, convs)]
    x = tf.concat(x_outputs, -1)
    return x
示例#6
0
def bn_dw_conv2D(x,
                 kernel_size,
                 strides=(1, 1),
                 dilation_rate=(1, 1),
                 use_bias=False):
    x = DepthwiseConv2D(kernel_size,
                        strides=strides,
                        depth_multiplier=1,
                        padding='same',
                        use_bias=use_bias,
                        dilation_rate=dilation_rate,
                        kernel_initializer='he_normal')(x)
    x = BatchNormalization(axis=-1, scale=False, epsilon=1e-3)(x)
    return Activation(pre_relu6)(x)
示例#7
0
def depthwise_sep_conv(input_tensor,
                       n_filters,
                       strides=(1, 1),
                       Dk=3,
                       alpha=1.0):
    x = DepthwiseConv2D(kernel_size=(Dk, Dk), strides=strides,
                        padding='same')(input_tensor)
    x = BatchNormalization(axis=3)(x)
    x = Activation('relu')(x)
    x = Conv2D(int(n_filters * alpha), (1, 1), strides=1, use_bias=False)(x)
    x = BatchNormalization(axis=3)(x)
    x = Activation('relu')(x)

    return x
示例#8
0
def _depthwise_conv_block(inputs, pointwise_conv_filters, strides=(1, 1)):
    x = DepthwiseConv2D((3, 3),
                        padding='same',
                        depth_multiplier=1,
                        strides=strides)(inputs)
    x = Dropout(0.1)(x)
    x = BatchNormalization(axis=-1)(x)
    x = Activation(relu6)(x)
    x = Conv2D(pointwise_conv_filters, (1, 1),
               padding='same',
               use_bias=False,
               strides=(1, 1))(x)
    x = BatchNormalization(axis=-1)(x)
    return Activation(relu6)(x)
def xception_block(x, channels):
    ##separable conv1
    x = Activation("relu")(x)
    x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Conv2D(channels, (1, 1), padding="same", use_bias=False)(x)
    x = BatchNormalization()(x)

    ##separable conv2
    x = Activation("relu")(x)
    x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Conv2D(channels, (1, 1), padding="same", use_bias=False)(x)
    x = BatchNormalization()(x)

    ##separable conv3
    x = Activation("relu")(x)
    x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x)
    x = BatchNormalization()(x)
    #tmp_x = np.zeros((X_train.shape[0],X_train.shape[1],7))
    x = Conv2D(channels, (1, 1), padding="same", use_bias=False)(x)
    x = BatchNormalization()(x)
    return x
示例#10
0
def Spatial_model(model_input, cfg, nb_classes):
    dropoutRate = 0.5
    norm_rate = 0.25
    Chans = cfg.chans
    '''input1   = Input(shape = (1, Chans, Samples))'''

    block1 = Conv2D(8, (1, 5), padding='same', use_bias=False,
                    name='conv1__R')(model_input)
    block1 = BatchNormalization(axis=-1, name='BN1__R')(block1)

    block1 = DepthwiseConv2D((Chans, 1),
                             use_bias=False,
                             depth_multiplier=2,
                             depthwise_constraint=max_norm(1.),
                             name='conv2__R')(block1)
    block1 = BatchNormalization(axis=-1, name='BN2__R')(
        block1)  # but when I use axis=1 before, it worked
    block1 = Activation('elu', name='Activation2__R')(block1)
    block1 = AveragePooling2D((1, 4), name='mean2__R')(block1)
    block1 = Dropout(dropoutRate, name='drop2__R')(block1)

    block2 = SeparableConv2D(16, (1, 5),
                             use_bias=False,
                             padding='same',
                             name='conv3__R')(block1)
    block2 = BatchNormalization(axis=-1, name='BN3__R')(block2)
    block2 = Activation('elu', name='Activation3__R')(block2)

    block3 = SeparableConv2D(16, (1, 5),
                             use_bias=False,
                             padding='same',
                             name='conv3__R')(block2)
    block3 = BatchNormalization(axis=-1, name='BN3__R')(block3)
    block3 = Activation('elu', name='Activation3__R')(block3)

    print('block3.shape', block3.shape)
    block3 = Reshape((int(block2.shape[-2]), int(block2.shape[-1])),
                     name='reshape__R')(block2)

    l_lstm_sent = LSTM(32, return_sequences=True, name='lstm1__R')(block3)
    l_lstm_sent = LSTM(8, return_sequences=True, name='lstm2__R')(l_lstm_sent)

    flatten = Flatten(name='flatten__R')(l_lstm_sent)
    preds = Dense(nb_classes,
                  name='dense__R',
                  activation='softmax',
                  kernel_constraint=max_norm(norm_rate))(flatten)
    # preds = Dense(nb_classes, name='dense', activation='softmax')(flatten)

    return Model(inputs=model_input, outputs=preds)
示例#11
0
def Transfer_Proposed_Conv_R(model_input, cfg, nb_classes):
    dropoutRate = 0.5
    norm_rate = 0.25
    Chans = cfg.chans
    '''input1   = Input(shape = (1, Chans, Samples))'''
    block1 = Conv2D(8, (9, 1), use_bias=False,
                    name='conv1__R')(model_input)  # spatial
    print('block1.shape', block1.shape)
    # block1 = Conv2D(8, (1, 5), padding='same', use_bias=False,name='conv1__R')(model_input)
    block1 = BatchNormalization(axis=-1, name='BN1__R')(block1)

    block1 = DepthwiseConv2D((1, 20),
                             use_bias=False,
                             depth_multiplier=2,
                             depthwise_constraint=max_norm(1.),
                             name='conv2__R')(block1)
    print('block1.shape', block1.shape)
    block1 = BatchNormalization(axis=-1, name='BN2__R')(
        block1)  # but when I use axis=1 before, it worked
    block1 = Activation('elu', name='Activation2__R')(block1)
    block1 = AveragePooling2D((1, 4), name='mean2__R')(block1)
    block1 = Dropout(dropoutRate, name='drop2__R')(block1)

    block2 = SeparableConv2D(16, (1, 16),
                             use_bias=False,
                             padding='same',
                             name='conv3__R')(block1)
    block2 = BatchNormalization(axis=-1, name='BN3__R')(block2)
    block2 = Activation('elu', name='Activation3__R')(block2)

    # if use LSTM after Dropout, it will be confused by the order
    # but the AveragePooling2D may be worked, then I will check
    ''' block2 = AveragePooling2D((1, 4))(block2)# it's(1,8)before
    block2 = Dropout(dropoutRate)(block2)'''
    print(block2.shape)  # 12,45,16
    # block3 = Reshape((48, 16))(block2)
    block3 = Reshape((int(block2.shape[-2]), int(block2.shape[-1])),
                     name='reshape__R')(block2)

    l_lstm_sent = LSTM(32, return_sequences=True, name='lstm1__R')(block3)
    l_lstm_sent = LSTM(8, return_sequences=True, name='lstm2__R')(l_lstm_sent)

    flatten = Flatten(name='flatten__R')(l_lstm_sent)
    preds = Dense(nb_classes,
                  name='dense__R',
                  activation='softmax',
                  kernel_constraint=max_norm(norm_rate))(flatten)
    # preds = Dense(nb_classes, name='dense', activation='softmax')(flatten)

    return Model(inputs=model_input, outputs=preds)
示例#12
0
def DW_Conv_BN(x,
               kernel_size=3,
               strides=1,
               kernel_initializer=CONV_KERNEL_INITIALIZER,
               activation=swish):
    x = DepthwiseConv2D(kernel_size,
                        strides=strides,
                        padding='same',
                        depth_multiplier=1,
                        depthwise_initializer=kernel_initializer)(x)
    x = BatchNormalization()(x)
    if activation:
        x = Activation(activation)(x)
    return x
示例#13
0
def layerSeparableConv(input_tensor):
    """

    :param input_tensor:
    :return:
    """
    x = DepthwiseConv2D(3, padding="same",
                        input_shape=input_tensor.shape)(input_tensor)
    x = BatchNormalization(axis=2)(x)
    x = Activation('relu')(x)
    x = Conv2D(128, kernel_size=1, padding="same")(x)
    x = BatchNormalization(axis=2)(x)
    x = Activation('relu')(x)
    return x
示例#14
0
def xception_downsample_block(x, channels, top_relu=False):
    ##separable conv1
    if top_relu:
        x = Activation("relu")(x)
    x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Conv2D(channels, (1, 1), padding="same", use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    ##separable conv2
    x = DepthwiseConv2D((3, 3), padding="same", use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Conv2D(channels, (1, 1), padding="same", use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    ##separable conv3
    x = DepthwiseConv2D((3, 3), strides=(2, 2), padding="same", use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Conv2D(channels, (1, 1), padding="same", use_bias=False)(x)
    x = BatchNormalization()(x)
    return x
示例#15
0
    def WaveletDecomposition(self, ch, ind=0, alpha=0.):
        wave = []
        for typ in ['HH', 'HL', 'LH', 'LL']:
            fil = self.filters(typ, 2, ch, 1)
            name = ''.join(['WaveletDecomp', typ, str(ind)])

            if typ in ['HH', 'HL', 'LH']:
                lay = DepthwiseConv2D(kernel_size=(2, 2),
                                      strides=(2, 2),
                                      depthwise_initializer=fil,
                                      name=name,
                                      trainable=False,
                                      use_bias=False,
                                      depth_multiplier=1,
                                      activity_regularizer=l1(alpha))
            else:
                lay = DepthwiseConv2D(kernel_size=(2, 2),
                                      strides=(2, 2),
                                      depthwise_initializer=fil,
                                      name=name,
                                      trainable=False,
                                      use_bias=False,
                                      depth_multiplier=1)

            wave.append(lay)

        def decomp(inp):

            out = inp
            output = []

            for lay in wave:
                output.append(lay(out))

            return output

        return decomp
示例#16
0
def aspp(x, input_shape, out_stride):
    # 膨胀率3 6 9
    b0 = Conv2D(128, (1, 1), padding="same", use_bias=False)(x)
    b0 = BatchNormalization()(b0)
    b0 = Activation("relu")(b0)

    b1 = DepthwiseConv2D((3, 3), dilation_rate=(6, 6), padding="same", use_bias=False)(x)
    b1 = BatchNormalization()(b1)
    b1 = Activation("relu")(b1)
    b1 = Conv2D(128, (1, 1), padding="same", use_bias=False)(b1)
    b1 = BatchNormalization()(b1)
    b1 = Activation("relu")(b1)

    b2 = DepthwiseConv2D((3, 3), dilation_rate=(12, 12), padding="same", use_bias=False)(x)
    b2 = BatchNormalization()(b2)
    b2 = Activation("relu")(b2)
    b2 = Conv2D(128, (1, 1), padding="same", use_bias=False)(b2)
    b2 = BatchNormalization()(b2)
    b2 = Activation("relu")(b2)

    b3 = DepthwiseConv2D((3, 3), dilation_rate=(12, 12), padding="same", use_bias=False)(x)
    b3 = BatchNormalization()(b3)
    b3 = Activation("relu")(b3)
    b3 = Conv2D(128, (1, 1), padding="same", use_bias=False)(b3)
    b3 = BatchNormalization()(b3)
    b3 = Activation("relu")(b3)

    out_shape = int(input_shape[0] / out_stride)
    out_shape1 = int(input_shape[1] / out_stride)
    b4 = AveragePooling2D(pool_size=(out_shape, out_shape1))(x)
    b4 = Conv2D(128, (1, 1), padding="same", use_bias=False)(b4)
    b4 = BatchNormalization()(b4)
    b4 = Activation("relu")(b4)
    b4 = BilinearUpsampling((out_shape, out_shape1))(b4)

    x = Concatenate()([b4, b0, b1, b2, b3])
    return x
示例#17
0
def layerInvertedResidual(input_tensor, expansion):
    """
    嘗試用 keras 的 DepthwiseConv2D 等 layers 實做 Inverted Residual Block.

    :param input_tensor:
    :param expansion: expand filters size
    :return:
    """

    x = DepthwiseConv2D(3,
                        padding="same",
                        depth_multiplier=expansion,
                        input_shape=input_tensor.shape)(input_tensor)
    x = BatchNormalization(axis=2)(x)
    x = Activation('relu')(x)
    x = Conv2D(3, kernel_size=1, padding="same")(x)
    x = BatchNormalization(axis=2)(x)
    x = Activation('relu')(x)
    added = Add()([input_tensor, x])
    '''
    Model: "model_2"
    __________________________________________________________________________________________________
    Layer (type)                    Output Shape         Param #     Connected to                     
    ==================================================================================================
    input_7 (InputLayer)            (None, 64, 64, 3)    0                                            
    __________________________________________________________________________________________________
    depthwise_conv2d_5 (DepthwiseCo (None, 64, 64, 18)   180         input_7[0][0]                    
    __________________________________________________________________________________________________
    batch_normalization_5 (BatchNor (None, 64, 64, 18)   256         depthwise_conv2d_5[0][0]         
    __________________________________________________________________________________________________
    activation_3 (Activation)       (None, 64, 64, 18)   0           batch_normalization_5[0][0]      
    __________________________________________________________________________________________________
    conv2d_10 (Conv2D)              (None, 64, 64, 3)    57          activation_3[0][0]               
    __________________________________________________________________________________________________
    batch_normalization_6 (BatchNor (None, 64, 64, 3)    256         conv2d_10[0][0]                  
    __________________________________________________________________________________________________
    activation_4 (Activation)       (None, 64, 64, 3)    0           batch_normalization_6[0][0]      
    __________________________________________________________________________________________________
    add_1 (Add)                     (None, 64, 64, 3)    0           input_7[0][0]                    
                                                                     activation_4[0][0]               
    ==================================================================================================
    Total params: 749
    Trainable params: 493
    Non-trainable params: 256
    __________________________________________________________________________________________________
    None
    '''

    return added
示例#18
0
def _bottleneck(inputs, filters, kernel, t, alpha, s, r=False, mode="relu"):
    """Bottleneck
    This function defines a basic bottleneck structure.

    # Arguments
        inputs: Tensor, input tensor of conv layer.
        filters: Integer, the dimensionality of the output space.
        kernel: An integer or tuple/list of 2 integers, specifying the
            width and height of the 2D convolution window.
        t: Integer, expansion factor.
            t is always applied to the input size.
        s: An integer or tuple/list of 2 integers,specifying the strides
            of the convolution along the width and height.Can be a single
            integer to specify the same value for all spatial dimensions.
        alpha: Integer, width multiplier.
        r: Boolean, Whether to use the residuals.

    # Returns
        Output tensor.
    """

    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
    # Depth
    tchannel = K.int_shape(inputs)[channel_axis] * t
    # Width
    cchannel = int(filters * alpha)

    x = _conv_block(inputs, tchannel, 1, 1)

    x = DepthwiseConv2D(kernel_size=kernel,
                        strides=s,
                        depth_multiplier=1,
                        padding='same')(x)
    x = BatchNormalization(axis=channel_axis)(x)
    if mode == "leaky":
        x = LeakyReLU(alpha=0.2)(x)
    elif mode == "prelu":
        x = PReLU(shared_axes=[1, 2])(x)
    else:
        x = Activation(relu6)(x)

    x = Conv2D(cchannel, kernel_size=1, strides=1, padding='same')(x)
    x = BatchNormalization(axis=channel_axis)(x)
    # x = PReLU(shared_axes=[1, 2])(x)

    if r:
        x = Add()([x, inputs])

    return x
示例#19
0
    def model(self):
        shape = (4, 4, 12)
        input = Input(shape=shape)
        if "Convolution_3v3" in self.mode:
            if "back" == self.padding:
                x = ZeroPadding2D(padding=((0, 1), (0, 1)),
                                  data_format=None)(input)
                x = Conv2D(filters=4,
                           kernel_size=(3, 3),
                           padding="valid",
                           strides=(2, 2))(x)
            else:
                x = Conv2D(filters=24,
                           kernel_size=(3, 3),
                           padding=self.padding,
                           strides=(self.stride, self.stride))(input)

        elif "DepthwiseConv_3v3" in self.mode:
            if "back" == self.padding:
                x = ZeroPadding2D(padding=((0, 1), (0, 1)),
                                  data_format=None)(input)
                x = DepthwiseConv2D(kernel_size=(3, 3),
                                    padding="valid",
                                    strides=(2, 2))(x)
            else:
                x = DepthwiseConv2D(kernel_size=(3, 3),
                                    padding=self.padding,
                                    strides=(self.stride, self.stride))(input)
        elif "Convolution_1v1" in self.mode:
            x = Conv2D(filters=8,
                       kernel_size=(1, 1),
                       padding='same',
                       strides=(1, 1))(input)
#        activation='relu'
        model = Model(inputs=input, outputs=x)
        return model
示例#20
0
 def test_tiny_depthwise_conv_valid_pad_depth_multiplier(self):
     np.random.seed(1988)
     input_dim = 16
     input_shape = (input_dim, input_dim, 3)
     depth_multiplier = 2
     kernel_height = 3
     kernel_width = 3
     # Define a model
     model = Sequential()
     model.add(DepthwiseConv2D(depth_multiplier=depth_multiplier, kernel_size=(kernel_height, kernel_width),
                               input_shape=input_shape, padding='valid', strides=(1, 1)))
     # Set some random weights
     model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
     # Test the keras model
     self._test_keras_model(model)
示例#21
0
    def convolution(self, input_, filters_, strides, depth_multiplier):

        if strides == (1, 1):
            x = input_
        else:
            x = ZeroPadding2D((1, 1))(input_)
        x = DepthwiseConv2D((3, 3), strides=strides, padding='same' if strides == (1, 1) else 'valid',
                            depth_multiplier=depth_multiplier)(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = Conv2D(filters_, (1, 1))(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)

        return x
示例#22
0
def _depthwise_conv_block_f(inputs,
                            depth_multiplier=1,
                            strides=(1, 1),
                            block_id=1):
    channel_axis = -1
    x = ZeroPadding2D(padding=(1, 1), name='conv_pad_%d' % block_id)(inputs)
    x = DepthwiseConv2D((3, 3),
                        padding='valid',
                        depth_multiplier=depth_multiplier,
                        strides=strides,
                        use_bias=False,
                        name='conv_dw_%d' % block_id)(x)
    x = BatchNormalization(axis=channel_axis,
                           name='conv_dw_%d_bn' % block_id)(x)
    return Activation(relu6, name='conv_dw_%d_relu' % block_id)(x)
示例#23
0
def DW_Conv_BN(x,
               kernel_size=3,
               strides=1,
               padding='same',
               activation=swish,
               kernel_initializer=CONV_KERNEL_INITIALIZER):
    x = DepthwiseConv2D(kernel_size,
                        strides=strides,
                        padding=padding,
                        use_bias=False,
                        depthwise_initializer=kernel_initializer)(x)
    x = BatchNormalization(momentum=0.9)(x)
    if activation:
        x = Activation(activation)(x)
    return x
 def _bottleneck(inputs, filters, kernel, t, s, r=False):
     channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
     tchannel = K.int_shape(inputs)[channel_axis] * t
     x = _conv_block(inputs, tchannel, (1, 1), (1, 1))
     x = DepthwiseConv2D(kernel,
                         strides=(s, s),
                         depth_multiplier=1,
                         padding='same')(x)
     x = BatchNormalization(axis=channel_axis)(x)
     x = ReLU(6.0)(x)
     x = Conv2D(filters, (1, 1), strides=(1, 1), padding='same')(x)
     x = BatchNormalization(axis=channel_axis)(x)
     if r:
         x = add([x, inputs])
     return x
示例#25
0
def kernel_expectation_2d(x, kernel_size, axis, vmin=0., vmax=1., name=None):
    """Implements a 2D linear interpolation (x for axis=0 and y for axis=1)
    using a depthwise convolution (non trainable).

    # Arguments
        x: Input tensor (None, H, W, num_points)
        kernel_size: tuple (h, w)

    # Return
        Tensor (None, H-h+1, W-w+1, num_points)
    """
    assert K.ndim(x) == 4, 'Input tensor must have ndim 4 {}'.format(K.ndim(x))

    if 'global_sam_cnt' not in globals():
        global global_sam_cnt
        global_sam_cnt = 0

    if name is None:
        name = '_%d' % global_sam_cnt
        global_sam_cnt += 1
    name = name + '%dx%d' % kernel_size

    num_filters = K.int_shape(x)[-1]

    lins = np.expand_dims(linspace_2d(kernel_size[0],
                                      kernel_size[1],
                                      axis=axis,
                                      vmin=vmin,
                                      vmax=vmax),
                          axis=-1)

    if num_filters > 1:
        lins = np.tile(lins, (1, 1, num_filters))

    f = DepthwiseConv2D(kernel_size,
                        padding='valid',
                        depth_multiplier=1,
                        strides=1,
                        use_bias=False,
                        name=name)
    x = f(x)

    wx = f.get_weights()
    wx[0][:, :, :, 0] = lins
    f.set_weights(wx)
    f.trainable = False

    return x
示例#26
0
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id):
    in_channels = inputs._keras_shape[-1]
    pointwise_conv_filters = int(filters * alpha)
    pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
    x = inputs
    prefix = 'block_{}_'.format(block_id)

    if block_id:
        # Expand
        x = Conv2D(expansion * in_channels,
                          kernel_size=1,
                          padding='same',
                          use_bias=False,
                          activation=None,
                          name=prefix + 'expand')(x)
        x = BatchNormalization(epsilon=1e-3,
                                      momentum=0.999,
                                      name=prefix + 'expand_BN')(x)
        x = Activation(relu6, name=prefix + 'expand_relu')(x)
    else:
        prefix = 'expanded_conv_'

    # Depthwise
    x = DepthwiseConv2D(kernel_size=3,
                               strides=stride,
                               activation=None,
                               use_bias=False,
                               padding='same',
                               name=prefix + 'depthwise')(x)
    x = BatchNormalization(epsilon=1e-3,
                                  momentum=0.999,
                                  name=prefix + 'depthwise_BN')(x)

    x = Activation(relu6, name=prefix + 'depthwise_relu')(x)

    # Project
    x = Conv2D(pointwise_filters,
                      kernel_size=1,
                      padding='same',
                      use_bias=False,
                      activation=None,
                      name=prefix + 'project')(x)
    x = BatchNormalization(
        epsilon=1e-3, momentum=0.999, name=prefix + 'project_BN')(x)

    if in_channels == pointwise_filters and stride == 1:
        return Add(name=prefix + 'add')([inputs, x])   # added with skip layer
    return x
示例#27
0
    def _bottleneck(self, inputs, filters, kernel, e, s, squeeze, nl):
        """Bottleneck
        This function defines a basic bottleneck structure.

        # Arguments
            inputs: Tensor, input tensor of conv layer.
            filters: Integer, the dimensionality of the output space.
            kernel: An integer or tuple/list of 2 integers, specifying the
                width and height of the 2D convolution window.
            e: Integer, expansion factor.
                t is always applied to the input size.
            s: An integer or tuple/list of 2 integers,specifying the strides
                of the convolution along the width and height.Can be a single
                integer to specify the same value for all spatial dimensions.
            squeeze: Boolean, Whether to use the squeeze.
            nl: String, nonlinearity activation type.

        # Returns
            Output tensor.
        """

        channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
        input_shape = K.int_shape(inputs)

        tchannel = int(e)
        cchannel = int(self.alpha * filters)

        r = s == 1 and input_shape[3] == filters

        x = self._conv_block(inputs, tchannel, (1, 1), (1, 1), nl)

        x = DepthwiseConv2D(kernel,
                            strides=(s, s),
                            depth_multiplier=1,
                            padding='same')(x)
        x = BatchNormalization(axis=channel_axis)(x)
        x = self._return_activation(x, nl)

        if squeeze:
            x = self._squeeze(x)

        x = Conv2D(cchannel, (1, 1), strides=(1, 1), padding='same')(x)
        x = BatchNormalization(axis=channel_axis)(x)

        if r:
            x = Add()([x, inputs])

        return x
示例#28
0
    def block(inputs):

        if expand_ratio != 1:
            x = Conv2D(filters,
                       kernel_size=[1, 1],
                       strides=[1, 1],
                       padding='same',
                       use_bias=False)(inputs)
            x = BatchNormalization(axis=channel_axis,
                                   momentum=batch_norm_momentum,
                                   epsilon=batch_norm_epsilon)(x)
            x = Swish()(x)
        else:
            x = inputs

        x = DepthwiseConv2D([kernel_size, kernel_size],
                            strides=strides,
                            padding='same',
                            use_bias=False)(x)
        x = BatchNormalization(axis=channel_axis,
                               momentum=batch_norm_momentum,
                               epsilon=batch_norm_epsilon)(x)
        x = Swish()(x)

        if has_se:
            x = SEBlock(input_filters, se_ratio, expand_ratio, data_format)(x)

        # output phase

        x = Conv2D(output_filters,
                   kernel_size=[1, 1],
                   strides=[1, 1],
                   padding='same',
                   use_bias=False)(x)
        x = BatchNormalization(axis=channel_axis,
                               momentum=batch_norm_momentum,
                               epsilon=batch_norm_epsilon)(x)

        if id_skip:
            # if all(s == 1 for s in strides) and (
            #         input_filters == output_filters):
            #
            #     # only apply drop_connect if skip presents.
            #     if drop_connect_rate:
            #         x = DropConnect(drop_connect_rate)(x)
            #     x = Add()([x, inputs])
            pass
        return x
示例#29
0
def layerInvertedResidual(input_tensor, expansion):
    """

    :param input_tensor:
    :param expansion: expand filters size
    :return:
    """
    x = DepthwiseConv2D(3, padding="same", depth_multiplier=expansion, input_shape=input_tensor.shape)(input_tensor)
    x = BatchNormalization(axis=2)(x)
    x = Activation('relu')(x)
    x = Conv2D(3, kernel_size=1, padding="same")(x)
    x = BatchNormalization(axis=2)(x)
    x = Activation('relu')(x)
    added = Add()([input_tensor, x])

    return added
示例#30
0
def kernel_sum(x, kernel_size, strides=1, padding='valid', name=None):

    f = DepthwiseConv2D(kernel_size,
                        padding=padding,
                        depth_multiplier=1,
                        strides=strides,
                        use_bias=False,
                        name=name)

    x = f(x)
    w = f.get_weights()
    w[0][:] = 1.
    f.set_weights(w)
    f.trainable = False

    return x