Ejemplo n.º 1
0
def _inverted_res_block(inputs,
                        expansion,
                        stride,
                        alpha,
                        filters,
                        block_id,
                        skip_connection,
                        rate=1):
    in_channels = inputs._keras_shape[-1]
    pointwise_conv_filters = int(filters * alpha)
    pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
    x = inputs
    prefix = 'expanded_conv_{}_'.format(block_id)
    if block_id:
        # Expand
        x = Conv2D(expansion * in_channels,
                   kernel_size=1,
                   padding='same',
                   use_bias=False,
                   activation=None,
                   name=prefix + 'expand')(x)
        x = BatchNormalization(epsilon=1e-3,
                               momentum=0.999,
                               name=prefix + 'expand_BN')(x)
        x = Activation(relu6, name=prefix + 'expand_relu')(x)
    else:
        prefix = 'expanded_conv_'
    # Depthwise
    x = DepthwiseConv2D(kernel_size=3,
                        strides=stride,
                        activation=None,
                        use_bias=False,
                        padding='same',
                        dilation_rate=(rate, rate),
                        name=prefix + 'depthwise')(x)
    x = BatchNormalization(epsilon=1e-3,
                           momentum=0.999,
                           name=prefix + 'depthwise_BN')(x)
    #    x = Activation(relu(x, max_value=6.), name=prefix + 'depthwise_relu')(x)
    x = Lambda(lambda x: relu(x, max_value=6.),
               name=prefix + 'depthwise_relu')(x)

    x = Conv2D(pointwise_filters,
               kernel_size=1,
               padding='same',
               use_bias=False,
               activation=None,
               name=prefix + 'project')(x)
    x = BatchNormalization(epsilon=1e-3,
                           momentum=0.999,
                           name=prefix + 'project_BN')(x)

    if skip_connection:
        return Add(name=prefix + 'add')([inputs, x])

    # if in_channels == pointwise_filters and stride == 1:
    #    return Add(name='res_connect_' + str(block_id))([inputs, x])

    return x
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id):
    channel_axis = -1
    in_channels = K.int_shape(inputs)[channel_axis]
    pointwise_conv_filters = int(filters * alpha)
    pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
    x = inputs
    prefix = 'block_{}_'.format(block_id)

    if block_id:
        # Expand

        # use bias
        x = Conv2D(expansion * in_channels,
                   kernel_size=1,
                   padding='same',
                   use_bias=True,
                   activation=None,
                   name=prefix + 'expand_Q')(x)
        x = BatchNormalization(axis=channel_axis,
                               epsilon=1e-3,
                               momentum=0.999,
                               name=prefix + 'expand_BN_Q')(x)
        x = ReLU(6., name=prefix + 'expand_relu_Q')(x)
    else:
        prefix = 'expanded_conv_'

    # Depthwise
    if stride == 2:
        correct_pad = ((0, 1), (0, 1))
        x = ZeroPadding2D(padding=correct_pad,
                          name=prefix + 'pad_Q')(x)
    x = DepthwiseConv2D(kernel_size=3,
                        strides=stride,
                        activation=None,
                        use_bias=True,
                        padding='same' if stride == 1 else 'valid',
                        name=prefix + 'depthwise_Q')(x)
    x = BatchNormalization(axis=channel_axis,
                           epsilon=1e-3,
                           momentum=0.999,
                           name=prefix + 'depthwise_BN_Q')(x)

    x = ReLU(6., name=prefix + 'depthwise_relu_Q')(x)

    # Project
    x = Conv2D(pointwise_filters,
               kernel_size=1,
               padding='same',
               use_bias=True,
               activation=None,
               name=prefix + 'project_Q')(x)
    x = BatchNormalization(axis=channel_axis,
                           epsilon=1e-3,
                           momentum=0.999,
                           name=prefix + 'project_BN_Q')(x)

    if in_channels == pointwise_filters and stride == 1:
        return Add(name=prefix + 'add_Q')([inputs, x])
    return x
Ejemplo n.º 3
0
def mobilenet_3d(input_shape: tuple, model_2d: Model):
    input_image = Input(input_shape)
    x = ZeroPadding3D()(input_image)
    x = conv2d3d(model_2d.get_layer('conv1'))(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = ZeroPadding3D()(x)
    x = DepthwiseConv2D()(x)
Ejemplo n.º 4
0
 def __global_depthwise_block(self, _inputs):
     assert _inputs._keras_shape[1] == _inputs._keras_shape[2]
     kernel_size = _inputs._keras_shape[1]
     x = DepthwiseConv2D((kernel_size, kernel_size),
                         strides=(1, 1),
                         depth_multiplier=1,
                         padding='same')(_inputs)
     return x
Ejemplo n.º 5
0
def _depthwise_conv_block(inputs, pointwise_conv_filters, strides=(1, 1)):
	x = DepthwiseConv2D((3, 3),padding='same',depth_multiplier=1,strides=strides)(inputs)
	x = Dropout(0.1)(x)
	x = BatchNormalization(axis = -1)(x)
	x = Activation(relu6)(x)
	x = Conv2D(pointwise_conv_filters, (1, 1),padding='same',use_bias=False,strides=(1, 1))(x)
	x = BatchNormalization(axis = -1)(x)
	return Activation(relu6)(x)
def aspp(x, input_shape, out_stride):
    # 膨胀率6 12 12
    b0 = Conv2D(128, (1, 1), padding="same", use_bias=False)(x)
    b0 = BatchNormalization()(b0)
    b0 = Activation("relu")(b0)

    b1 = DepthwiseConv2D((3, 3),
                         dilation_rate=(6, 6),
                         padding="same",
                         use_bias=False)(x)
    b1 = BatchNormalization()(b1)
    b1 = Activation("relu")(b1)
    b1 = Conv2D(128, (1, 1), padding="same", use_bias=False)(b1)
    b1 = BatchNormalization()(b1)
    b1 = Activation("relu")(b1)

    b2 = DepthwiseConv2D((3, 3),
                         dilation_rate=(12, 12),
                         padding="same",
                         use_bias=False)(x)
    b2 = BatchNormalization()(b2)
    b2 = Activation("relu")(b2)
    b2 = Conv2D(128, (1, 1), padding="same", use_bias=False)(b2)
    b2 = BatchNormalization()(b2)
    b2 = Activation("relu")(b2)

    b3 = DepthwiseConv2D((3, 3),
                         dilation_rate=(12, 12),
                         padding="same",
                         use_bias=False)(x)
    b3 = BatchNormalization()(b3)
    b3 = Activation("relu")(b3)
    b3 = Conv2D(128, (1, 1), padding="same", use_bias=False)(b3)
    b3 = BatchNormalization()(b3)
    b3 = Activation("relu")(b3)

    out_shape = int(input_shape[0] / out_stride)
    out_shape1 = int(input_shape[1] / out_stride)
    b4 = AveragePooling2D(pool_size=(out_shape, out_shape1))(x)
    b4 = Conv2D(128, (1, 1), padding="same", use_bias=False)(b4)
    b4 = BatchNormalization()(b4)
    b4 = Activation("relu")(b4)
    b4 = BilinearUpsampling((out_shape, out_shape1))(b4)

    x = Concatenate()([b4, b0, b1, b2, b3])
    return x
Ejemplo n.º 7
0
    def _layer(inp):
        gaussian_layer = DepthwiseConv2D(k, use_bias=False, padding='same', name='gaussian_blur_block')
        output = gaussian_layer(inp)
        # print(weights.shape, gaussian_layer.get_weights()[0].shape)
        gaussian_layer.set_weights([_kernel()])
        gaussian_layer.trainable = False

        return output
Ejemplo n.º 8
0
def add_m_inception(inp,n,m,use_tiny=True,merge='add'):
    outs=[]
    outs.append(Conv2D(n,(1,1),padding='same',data_format='channels_last',activation='relu')(inp))
    for i in range(1,m):
        out=inp
        for m in range(i):
            if(use_tiny):
                out=DepthwiseConv2D((1,3),padding='same',data_format='channels_last',activation=None,use_bias=False)(out)
                out=DepthwiseConv2D((3,1),padding='same',data_format='channels_last',activation=None,use_bias=False)(out)
                out=Conv2D(n,(1,1),padding='same',data_format='channels_last',activation='relu')(out)
            else:
                out=SeparableConv2D(n,(3,3),padding='same',data_format='channels_last',activation='relu')(out)
        outs.append(out)
    if(merge=='concat'):
        return Concatenate()(outs)
    else:
        return Add()(outs)
Ejemplo n.º 9
0
def DW_Conv_BN(x, kernel_size=3, strides=1, padding='same', activation=swish,
               kernel_initializer=CONV_KERNEL_INITIALIZER, kernel_regularizer=KERNEL_REGULIZER):
    x = DepthwiseConv2D(kernel_size, strides=strides, padding=padding, use_bias=False, 
                        depthwise_initializer=kernel_initializer, depthwise_regularizer=kernel_regularizer)(x)
    x = BatchNormalization()(x)
    if activation:
        x = Activation(activation)(x)
    return x
def create_3d_model(inputs, hyper_params, H, W, CHANNELS):
    # inputs=Input(shape=(40,40,16))
    x = BatchNormalization()(inputs)
    x = DepthwiseConv2D(kernel_size=(3, 3), activation='relu',
                        padding='same')(x)
    x = MaxPooling2D(pool_size=(3, 3))(x)
    x = Flatten()(x)
    return x
Ejemplo n.º 11
0
def downsampling_block(x, filters, width, padding='same', activation='relu'):
    x = BatchNormalization(scale=True)(x)
    x = Activation(activation)(x)
    x1 = MaxPooling2D(pool_size=2, strides=2, padding=padding)(x)
    x2 = DepthwiseConv2D(3, depth_multiplier=1, strides=2, padding=padding)(x)
    x = concatenate([x1, x2], axis=3)
    x = Conv2D(filters, 1, strides=1)(x)
    return x
Ejemplo n.º 12
0
def xception_block(x, channels):
    x = Activation('relu')(x)
    x = DepthwiseConv2D((3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Conv2D(channels, (1, 1), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = DepthwiseConv2D((3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Conv2D(channels, (1, 1), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = DepthwiseConv2D((3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Conv2D(channels, (1, 1), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    return x
def build_model():
    inputs = Input(shape=target_dims)
    net = Conv2D(32, kernel_size=3, strides=1, padding="same")(inputs)
    net = LeakyReLU()(net)
    net = Conv2D(32, kernel_size=3, strides=1, padding="same")(net)
    net = LeakyReLU()(net)
    net = Conv2D(32, kernel_size=3, strides=2, padding="same")(net)
    net = LeakyReLU()(net)

    net = Conv2D(32, kernel_size=3, strides=1, padding="same")(net)
    net = LeakyReLU()(net)
    net = Conv2D(32, kernel_size=3, strides=1, padding="same")(net)
    net = LeakyReLU()(net)
    net = Conv2D(32, kernel_size=3, strides=2, padding="same")(net)
    net = LeakyReLU()(net)

    shortcut = net

    net = DepthwiseConv2D(kernel_size=3, strides=1, padding='same', kernel_initializer='he_normal')(net)
    net = BatchNormalization(axis=3)(net)
    net = LeakyReLU()(net)
    net = Conv2D(filters=32, kernel_size=1, strides=1, padding='same', kernel_initializer='he_normal')(net)
    net = BatchNormalization(axis=3)(net)
    net = LeakyReLU()(net)

    net = DepthwiseConv2D(kernel_size=3, strides=1, padding='same', kernel_initializer='he_normal')(net)
    net = BatchNormalization(axis=3)(net)
    net = LeakyReLU()(net)
    net = Conv2D(filters=32, kernel_size=1, strides=1, padding='same', kernel_initializer='he_normal')(net)
    net = BatchNormalization(axis=3)(net)
    net = LeakyReLU()(net)

    net = Add()([net, shortcut])

    net = GlobalAveragePooling2D()(net)
    net = Dropout(0.2)(net)

    net = Dense(128, activation='relu')(net)
    outputs = Dense(num_classes, activation='softmax')(net)

    model = Model(inputs=inputs, outputs=outputs)
    model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=["accuracy"])

    model.summary()
    return model
Ejemplo n.º 14
0
    def _bottleneck(self, filters, strides, t, prev_layers, depth_kernel, alpha, *,
                      res_con = False, first = False, downsample = True):
        
        tk = K.int_shape(prev_layers)[-1] * t
        k_prime = self._make_divisible(int(filters*alpha)) #pointwise filter

        if first:
            
            first_cv_filter = self._make_divisible(x = 32 * alpha)
            
            if downsample:
                x = self._conv_def(filters = 32, kernel_size = 3, strides = 2, 
                              padding = 'valid', prev_layers = prev_layers)
            else:
                x = self._conv_def(filters = 32, kernel_size = 3, strides = 1, 
                              padding = 'valid', prev_layers = prev_layers)
                
            x = self._bn_def(prev_layers = x)
            x = Activation(self._relu6)(x)


        elif first is False:
            x = self._conv_def(tk, kernel_size = 1, prev_layers = prev_layers)
            x = self._bn_def(prev_layers = x)
            x = Activation(self._relu6)(x)

        if strides > 1:
            padding = 'valid'
            zpad = self._zero_pad(prev_layers = x)
            dcv = DepthwiseConv2D(kernel_size = depth_kernel, strides = strides, padding = padding, use_bias = False)(zpad)
            
        elif strides == 1:
            padding = 'same'
            dcv = DepthwiseConv2D(kernel_size = depth_kernel, strides = strides,
                               padding = padding, use_bias = False)(x)
        x = self._bn_def(prev_layers = dcv)
        x = Activation(self._relu6)(x)

        x = self._conv_def(k_prime, kernel_size = 1, prev_layers = x)
        x = self._bn_def(prev_layers = x)

        if res_con:
            return Add()([prev_layers, x])
        
        return x
Ejemplo n.º 15
0
def DW_Conv_BN(x, kernel_size=3, strides=1, depth_multiplier=1):
    # depth-wise
    x = DepthwiseConv2D(kernel_size,
                        strides=strides,
                        padding='same',
                        depth_multiplier=depth_multiplier)(x)
    x = BatchNormalization()(x)
    x = ReLU(6.)(x)
    return x
Ejemplo n.º 16
0
def get_block(x_input, input_channels, output_channels):
    x = Conv2D(input_channels,
               kernel_size=(1, 1),
               padding='same',
               use_bias=False)(x_input)
    x = get_top(x)
    # depthwise convolution işlemi her bir kanalda ayrı ayrı çalışarak hesaplama maliyetini azaltır
    x = DepthwiseConv2D(kernel_size=(1, 3), padding='same', use_bias=False)(x)
    x = get_top(x)
    x = MaxPooling2D(pool_size=(2, 1), strides=(2, 1))(x)
    x = DepthwiseConv2D(kernel_size=(3, 1), padding='same', use_bias=False)(x)
    x = get_top(x)
    x = Conv2D(output_channels,
               kernel_size=(2, 1),
               strides=(1, 2),
               padding='same',
               use_bias=False)(x)
    return x
Ejemplo n.º 17
0
    def build(self, input_shape):
        """Creates the layer weights.
        Must be implemented on all layers that have weights.

        Parameters
        ----------

        input_shape: Union[list, tuple, Any]
            Keras tensor (future input to layer) or list/tuple of Keras tensors
            to reference for weight shape computations.
        """

        DepthwiseConv2D.build(self, input_shape)
        self.init_neurons(input_shape)

        if self.config.getboolean('cell', 'bias_relaxation'):
            self.b0 = k.variable(k.get_value(self.bias))
            self.add_update([(self.bias, self.update_b())])
Ejemplo n.º 18
0
    def mobilenet_block(x, f, s=1):
        x = DepthwiseConv2D(3, strides=s, padding='same')(x)
        x = BatchNormalization()(x)
        x = ReLU()(x)

        x = Conv2D(f, 1, strides=1, padding='same')(x)
        x = BatchNormalization()(x)
        x = ReLU()(x)
        return x
Ejemplo n.º 19
0
def _inverted_res_block(inputs,
                        filters,
                        expansion,
                        stride,
                        alpha,
                        block_id,
                        use_se=False):
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
    in_channels = K.int_shape(inputs)[-1]
    pointwise_filters = int(filters * alpha)
    x = inputs
    prefix = 'block_{}_'.format(block_id)

    # Expand
    x = Conv2D(expansion * in_channels,
               kernel_size=1,
               padding='same',
               use_bias=False,
               name=prefix + 'expand')(x)
    x = BatchNormalization(axis=-1,
                           epsilon=1e-3,
                           momentum=0.999,
                           name=prefix + 'expand_BN')(x)
    x = ReLU(6, name=prefix + 'expand_relu')(x)

    # Depthwise
    if stride == 2:
        x = ZeroPadding2D(name=prefix + 'pad')(x)
    x = DepthwiseConv2D(kernel_size=3,
                        strides=stride,
                        padding='same' if stride == 1 else 'valid',
                        use_bias=False,
                        name=prefix + 'depthwise')(x)
    x = BatchNormalization(axis=-1,
                           epsilon=1e-3,
                           momentum=0.999,
                           name=prefix + 'depthwise_BN')(x)
    x = ReLU(6, name=prefix + 'depthwise_relu')(x)

    # Project
    x = Conv2D(pointwise_filters,
               kernel_size=1,
               padding='same',
               use_bias=False,
               name=prefix + 'project')(x)
    x = BatchNormalization(axis=-1,
                           epsilon=1e-3,
                           momentum=0.999,
                           name=prefix + 'project_BN')(x)

    # Use SE block if needed
    if use_se:
        x = squeeze_excite_block(x)

    if in_channels == pointwise_filters and stride == 1:
        return Add(name=prefix + 'add')([inputs, x])
    return x
Ejemplo n.º 20
0
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id):
    prefix = 'block_{}_'.format(block_id)

    in_channels = inputs._keras_shape[-1]
    pointwise_conv_filters = int(filters * alpha)
    pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
    x = inputs

    # Expand
    if block_id:
        x = Conv2D(expansion * in_channels,
                   kernel_size=1,
                   strides=1,
                   padding='same',
                   use_bias=False,
                   activation=None,
                   kernel_initializer="he_normal",
                   kernel_regularizer=regularizers.l2(4e-5),
                   name=prefix + 'expand')(x)
        x = BatchNormalization(epsilon=1e-3,
                               momentum=0.999,
                               name=prefix + 'expand_BN')(x)
        x = ReLU(6., name=prefix + 'expand_relu')(x)
    else:
        prefix = 'expanded_conv_'

    # Depthwise
    x = DepthwiseConv2D(kernel_size=3,
                        strides=stride,
                        activation=None,
                        use_bias=False,
                        padding='same',
                        kernel_initializer="he_normal",
                        depthwise_regularizer=regularizers.l2(4e-5),
                        name=prefix + 'depthwise')(x)
    x = BatchNormalization(epsilon=1e-3,
                           momentum=0.999,
                           name=prefix + 'depthwise_BN')(x)
    x = ReLU(6., name=prefix + 'depthwise_relu')(x)

    # Project
    x = Conv2D(pointwise_filters,
               kernel_size=1,
               strides=1,
               padding='same',
               use_bias=False,
               activation=None,
               kernel_initializer="he_normal",
               kernel_regularizer=regularizers.l2(4e-5),
               name=prefix + 'project')(x)
    x = BatchNormalization(epsilon=1e-3,
                           momentum=0.999,
                           name=prefix + 'project_BN')(x)

    if in_channels == pointwise_filters and stride == 1:
        return Add(name=prefix + 'add')([inputs, x])
    return x
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id):
    in_channels = backend.int_shape(inputs)[-1]
    prefix = 'block_{}_'.format(block_id)

    x = inputs
    pointwise_filters = _make_divisible(int(filters * alpha), 8)
    #---------------------------------#
    #   part1 利用1x1卷积进行通道上升
    #---------------------------------#
    if block_id:
        x = Conv2D(expansion * in_channels,
                   kernel_size=1,
                   padding='same',
                   use_bias=False,
                   activation=None,
                   name=prefix + 'expand')(x)
        x = BatchNormalization(epsilon=1e-3,
                               momentum=0.999,
                               name=prefix + 'expand_BN')(x)
        x = Activation(relu6, name=prefix + 'expand_relu')(x)
    else:
        prefix = 'expanded_conv_'

    if stride == 2:
        x = ZeroPadding2D(padding=correct_pad(x, 3), name=prefix + 'pad')(x)

    #---------------------------------#
    #   part2 进行3x3的深度可分离卷积
    #---------------------------------#
    x = DepthwiseConv2D(kernel_size=3,
                        strides=stride,
                        activation=None,
                        use_bias=False,
                        padding='same' if stride == 1 else 'valid',
                        name=prefix + 'depthwise')(x)
    x = BatchNormalization(epsilon=1e-3,
                           momentum=0.999,
                           name=prefix + 'depthwise_BN')(x)
    x = Activation(relu6, name=prefix + 'depthwise_relu')(x)

    #-----------------------------------------------------------#
    #   part3 利用1x1卷积进行通道的下降
    #   而且不使用relu函数,保证特征不被破坏
    #-----------------------------------------------------------#
    x = Conv2D(pointwise_filters,
               kernel_size=1,
               padding='same',
               use_bias=False,
               activation=None,
               name=prefix + 'project')(x)
    x = BatchNormalization(epsilon=1e-3,
                           momentum=0.999,
                           name=prefix + 'project_BN')(x)

    if in_channels == pointwise_filters and stride == 1:
        return Add(name=prefix + 'add')([inputs, x])
    return x
Ejemplo n.º 22
0
def aspp(x:ops.Tensor,input_shape:Tuple,out_stride:int)->ops.Tensor:
    '''
    To build aspp layer
    args:
        x:operate tensor
        input_shape:training shape
        out_stride:dilate rate
    returns:
        x:operated tensor
    '''
	b0=Conv2D(256,(1,1),padding="same",use_bias=False)(x)
	b0=BatchNormalization()(b0)
	b0=Activation("relu")(b0)
	
	b1=DepthwiseConv2D((3,3),dilation_rate=(6,6),padding="same",use_bias=False)(x)
	b1=BatchNormalization()(b1)
	b1=Activation("relu")(b1)
	b1=Conv2D(256,(1,1),padding="same",use_bias=False)(b1)
	b1=BatchNormalization()(b1)
	b1=Activation("relu")(b1)
	
	b2=DepthwiseConv2D((3,3),dilation_rate=(12,12),padding="same",use_bias=False)(x)
	b2=BatchNormalization()(b2)
	b2=Activation("relu")(b2)
	b2=Conv2D(256,(1,1),padding="same",use_bias=False)(b2)
	b2=BatchNormalization()(b2)
	b2=Activation("relu")(b2)	

	b3=DepthwiseConv2D((3,3),dilation_rate=(12,12),padding="same",use_bias=False)(x)
	b3=BatchNormalization()(b3)
	b3=Activation("relu")(b3)
	b3=Conv2D(256,(1,1),padding="same",use_bias=False)(b3)
	b3=BatchNormalization()(b3)
	b3=Activation("relu")(b3)
	
	out_shape=int(input_shape[0]/out_stride)
	b4=AveragePooling2D(pool_size=(out_shape,out_shape))(x)
	b4=Conv2D(256,(1,1),padding="same",use_bias=False)(b4)
	b4=BatchNormalization()(b4)
	b4=Activation("relu")(b4)
	b4=BilinearUpsampling((out_shape,out_shape))(b4)
	
	x=Concatenate()([b4,b0,b1,b2,b3])
	return x
Ejemplo n.º 23
0
def _bottleneck(inputs, filters, kernel, t, s, r=False):  #2x2的_bottleneck
    """Bottleneck
    This function defines a basic bottleneck structure.
    # Arguments
        inputs: Tensor, input tensor of conv layer.
        filters: Integer, the dimensionality of the output space.
        kernel: An integer or tuple/list of 2 integers, specifying the
            width and height of the 2D convolution window.
        t: Integer, expansion factor.
            t is always applied to the input size.
        s: An integer or tuple/list of 2 integers,specifying the strides
            of the convolution along the width and height.Can be a single
            integer to specify the same value for all spatial dimensions.
        r: Boolean, Whether to use the residuals.
    # Returns
        Output tensor.
    """

    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
    tchannel = K.int_shape(inputs)[channel_axis] * t

    x = _conv_block(inputs, tchannel, (1, 1), (1, 1))

    x = DepthwiseConv2D(kernel,
                        strides=(s, s),
                        depth_multiplier=1,
                        padding='same')(x)
    x = BatchNormalization(axis=channel_axis)(x)
    x = keras.layers.ReLU(6.)(x)
    #添加2x2
    x = DepthwiseConv2D(kernel,
                        strides=(s, s),
                        depth_multiplier=1,
                        padding='same')(x)
    x = BatchNormalization(axis=channel_axis)(x)
    x = keras.layers.ReLU(6.)(x)
    #x = Activation(relu6)(x)

    x = Conv2D(filters, (1, 1), strides=(1, 1), padding='same')(x)
    x = BatchNormalization(axis=channel_axis)(x)

    if r:
        x = add([x, inputs])
    return x
Ejemplo n.º 24
0
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id):
    channel_axis = -1 if K.image_data_format() == 'channels_last' else 1
    in_channels = K.int_shape(inputs)[channel_axis]
    pointwise_conv_filters = int(filters * alpha)
    pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
    x = inputs
    prefix = 'block_{}_'.format(block_id)

    if block_id:
        # Expand the in_channels
        x = Conv2D(expansion * in_channels,
                   kernel_size=1,
                   padding='same',
                   use_bias=False,
                   activation=None,
                   name=prefix + 'expand')(x)
        x = BatchNormalization(axis=channel_axis,
                               epsilon=1e-3,
                               momentum=0.999,
                               name=prefix + 'expand_BN')(x)
        x = ReLU(6., name=prefix + 'expand_relu')(x)
    else:
        prefix = 'expanded_conv_'

    # Apply the Depthwise Conv.
    if stride == 2:
        x = ZeroPadding2D(padding=correct_pad(K, x, 3), name=prefix + 'pad')(x)
    x = DepthwiseConv2D(kernel_size=3,
                        strides=stride,
                        activation=None,
                        use_bias=False,
                        padding='same' if stride == 1 else 'valid',
                        name=prefix + 'depthwise')(x)
    x = BatchNormalization(axis=channel_axis,
                           epsilon=1e-3,
                           momentum=0.999,
                           name=prefix + 'depthwise_BN')(x)

    x = ReLU(6., name=prefix + 'depthwise_relu')(x)

    # Project
    x = Conv2D(pointwise_filters,
               kernel_size=1,
               padding='same',
               use_bias=False,
               activation=None,
               name=prefix + 'project')(x)
    x = BatchNormalization(axis=channel_axis,
                           epsilon=1e-3,
                           momentum=0.999,
                           name=prefix + 'project_BN')(x)

    if in_channels == pointwise_filters and stride == 1:
        return Add(name=prefix + 'add')([inputs, x])

    return x
def _bottleneck(inputs, filters, kernel, t, s, r=False):
    """Bottleneck
    This function defines a basic bottleneck structure.

    # Arguments
        inputs: Tensor, input tensor of conv layer.
        filters: Integer, the dimensionality of the output space.
        kernel: An integer or tuple/list of 2 integers, specifying the
            width and height of the 2D convolution window.
        t: Integer, expansion factor.
            t is always applied to the input size.
        s: An integer or tuple/list of 2 integers,specifying the strides
            of the convolution along the width and height.Can be a single
            integer to specify the same value for all spatial dimensions.
        r: Boolean, Whether to use the residuals.

    # Returns
        Output tensor.
    """
    global nlay

    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

    # Create expansions layer only if needed (expansion factor >1)
    if t > 1:
        tchannel = K.int_shape(inputs)[channel_axis] * t
        x = _conv_block(inputs, tchannel, (1, 1), (1, 1), use_bias=False)
    else:
        x = inputs

    x = DepthwiseConv2D(kernel,
                        strides=(s, s),
                        depth_multiplier=1,
                        padding='same',
                        name='expanded_conv_%d_depthwise' % nlay,
                        use_bias=False)(x)
    x = BatchNormalization(
        axis=channel_axis,
        name='expanded_conv_%d_depthwise_batch_normalization' % nlay)(x)
    x = Activation(relu6,
                   name='expanded_conv_%d_depthwise_activation' % nlay)(x)

    x = Conv2D(filters, (1, 1),
               strides=(1, 1),
               padding='same',
               name='expanded_conv_%d_project' % nlay,
               use_bias=False)(x)
    x = BatchNormalization(
        axis=channel_axis,
        name='expanded_conv_%d_project_batch_normalization' % nlay)(x)

    if r:
        x = add([x, inputs], name="expanded_conv_%d_add" % nlay)

    nlay += 1
    return x
Ejemplo n.º 26
0
def ghostBottleneck(x,
                    hidden_dim,
                    out_dim,
                    kernel_size=3,
                    ratio=2,
                    strides=1,
                    use_se=False,
                    kernel_initializer='he_normal',
                    kernel_regularizer=None):
    assert strides in [1, 2]
    input_dim = int(x.shape[3])
    hidden = GhostModule(x,
                         hidden_dim,
                         kernel_size=1,
                         ratio=ratio,
                         kernel_initializer=kernel_initializer,
                         kernel_regularizer=kernel_regularizer)
    if strides == 2:
        hidden = DepthwiseConv2D(
            kernel_size,
            strides=2,
            padding='SAME',
            depthwise_regularizer=kernel_regularizer,
            depthwise_initializer=kernel_initializer)(hidden)
    if use_se:
        # pass
        hidden = SELayer(hidden, hidden_dim)
    res = GhostModule(hidden,
                      out_dim,
                      kernel_size=1,
                      ratio=ratio,
                      relu=False,
                      kernel_initializer=kernel_initializer,
                      kernel_regularizer=kernel_regularizer)
    shortcut = x
    if strides == 2:
        shortcut = DepthwiseConv2D(3, strides=2, padding='SAME')(shortcut)
    if input_dim != out_dim:
        shortcut = Conv2D(out_dim, 1)(shortcut)
        shortcut = BatchNormalization()(shortcut)
    out = Add()([res, shortcut])  # 使用keras layer包装
    # out = res+shortcut
    return out
Ejemplo n.º 27
0
def xception_downsample_block(x, channels, top_relu=False):
    if (top_relu):
        x = Activation('relu')(x)
    x = DepthwiseConv2D((3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Conv2D(channels, (1, 1), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = DepthwiseConv2D((3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Conv2D(channels, (1, 1), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = DepthwiseConv2D((3, 3), strides=(2, 2), padding='same',
                        use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Conv2D(channels, (1, 1), padding='same', use_bias=False)(x)
    x = BatchNormalization()(x)
    return x
Ejemplo n.º 28
0
def mobile_block(x, f1, f2, s):
    conv1 = DepthwiseConv2D(   kernel_size=[3, 3], strides=[s, s], padding='same', use_bias=False)(x)
    bn1   = BatchNormalization()(conv1)
    relu1 = ReLU()(bn1)

    conv2 = Conv2D(filters=f2, kernel_size=[1, 1], strides=[1, 1], padding='same', use_bias=False)(relu1)
    bn2   = BatchNormalization()(conv2)
    relu2 = ReLU()(bn2)

    return relu2
Ejemplo n.º 29
0
 def __init__(self, k_size, H, d, glu_split_dim=-1):
     super(LightConv).__init__()
     self.H = H
     self.d = d
     self.glu_split_dim = glu_split_dim
     self.input_dense = Dense(2 * d)
     self.dep_conv = DepthwiseConv2D(kernel_size=(k_size, 1),
                                     kernel_constraint=SoftmaxConstraint(),
                                     padding='same')
     self.output_dense = Dense(d)
Ejemplo n.º 30
0
def _sep_conv_bn(inputs, filters, kernel, strides, depth_activation=False):
    # depthwise
    x = DepthwiseConv2D(kernel, strides=strides, padding='same')(inputs)
    x = BatchNormalization(axis=-1)(x)
    x = ReLU(max_value=6)(x)
    # pointwise
    x = Conv2D(filters, (1, 1), strides=1, padding='same')(x)
    x = BatchNormalization(axis=-1)(x)
    x = ReLU(max_value=6)(x)
    return x