Exemplo n.º 1
0
def BottleneckBlock(filters, rate, name, batchnorm=False, input=None):

    # Conform to functional API
    if input is None:
        return (lambda x: BottleneckBlock(
            filters, rate, name, batchnorm=batchnorm, input=x))

    x = input

    name = name + "_"

    # Expand
    x = Conv2D(6 * int(input.shape[-1]),
               kernel_size=1,
               padding="same",
               use_bias=False,
               activation=None,
               name=name + "expand")(x)
    if batchnorm:
        x = BatchNormalization(name=name + "expand_BN", momentum=0.1)(x)
    x = ReLU(max_value=6.0, name=name + "expand_relu")(x)

    # Depthwise
    x = DepthwiseConv2D(kernel_size=3,
                        strides=1,
                        activation=None,
                        use_bias=False,
                        padding="same",
                        dilation_rate=(rate, rate),
                        name=name + "depthwise")(x)
    if batchnorm:
        x = BatchNormalization(name=name + "depthwise_BN", momentum=0.1)(x)
    x = ReLU(max_value=6.0, name=name + "depthwise_relu")(x)

    # Pointwise
    x = Conv2D(filters,
               kernel_size=1,
               padding="same",
               use_bias=False,
               activation=None,
               name=name + "project")(x)
    if batchnorm:
        x = BatchNormalization(name=name + "project_BN", momentum=0.1)(x)

    if filters == input.shape[-1]:
        x = Add(name=name + "add")([input, x])

    return x
Exemplo n.º 2
0
    def depthwise_block(x, strides, init_weights=None, **metaparameters):
        """ Construct a Depthwise Separable Convolution block
            x         : input to the block
            strides   : strides
            n_filters : number of filters
            alpha     : width multiplier
            reg       : kernel regularizer
        """
        n_filters = metaparameters['n_filters']
        alpha = metaparameters['alpha']
        if 'reg' in metaparameters:
            reg = metaparameters['reg']
        else:
            reg = MobileNetV1.reg

        if init_weights is None:
            init_weights = MobileNetV1.init_weights

        # Apply the width filter to the number of feature maps
        filters = int(n_filters * alpha)

        # Strided convolution to match number of filters
        if strides == (2, 2):
            x = ZeroPadding2D(padding=((0, 1), (0, 1)))(x)
            padding = 'valid'
        else:
            padding = 'same'

        # Depthwise Convolution
        x = DepthwiseConv2D((3, 3),
                            strides,
                            padding=padding,
                            use_bias=False,
                            kernel_initializer=init_weights,
                            kernel_regularizer=reg)(x)
        x = BatchNormalization()(x)
        x = ReLU(6.0)(x)

        # Pointwise Convolution
        x = Conv2D(filters, (1, 1),
                   strides=(1, 1),
                   padding='same',
                   use_bias=False,
                   kernel_initializer=init_weights,
                   kernel_regularizer=reg)(x)
        x = BatchNormalization()(x)
        x = ReLU(6.0)(x)
        return x
Exemplo n.º 3
0
    def _bottleneck(self, inputs, filters, kernel, e, s, squeeze, nl):
        """Bottleneck
        This function defines a basic bottleneck structure.

        # Arguments
            inputs: Tensor, input tensor of conv layer.
            filters: Integer, the dimensionality of the output space.
            kernel: An integer or tuple/list of 2 integers, specifying the
                width and height of the 2D convolution window.
            e: Integer, expansion factor.
                t is always applied to the input size.
            s: An integer or tuple/list of 2 integers,specifying the strides
                of the convolution along the width and height.Can be a single
                integer to specify the same value for all spatial dimensions.
            squeeze: Boolean, Whether to use the squeeze.
            nl: String, nonlinearity activation type.

        # Returns
            Output tensor.
        """

        channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
        input_shape = K.int_shape(inputs)

        tchannel = int(e)
        cchannel = int(self.alpha * filters)

        r = s == 1 and input_shape[3] == filters

        x = self._conv_block(inputs, tchannel, (1, 1), (1, 1), nl)

        x = DepthwiseConv2D(kernel,
                            strides=(s, s),
                            depth_multiplier=1,
                            padding='same')(x)
        x = BatchNormalization(axis=channel_axis)(x)
        x = self._return_activation(x, nl)

        if squeeze:
            x = self._squeeze(x)

        x = Conv2D(cchannel, (1, 1), strides=(1, 1), padding='same')(x)
        x = BatchNormalization(axis=channel_axis)(x)

        if r:
            x = Add()([x, inputs])

        return x
def cheap_operations(x,
                     output_filters,
                     kernel_size,
                     strides=(1, 1),
                     padding='same',
                     act=True,
                     use_bias=False,
                     name=None):
    x = DepthwiseConv2D(kernel_size=kernel_size,
                        strides=strides,
                        padding=padding,
                        use_bias=use_bias,
                        name=name + '_0')(x)
    x = BatchNormalization(name=name + '_1')(x)
    x = ReLU(name=name + '_relu')(x) if act else x
    return x
Exemplo n.º 5
0
def _keras_depthwise_conv2d_core(shape=None, data=None):
  assert shape is None or data is None
  if shape is None:
    shape = data.shape

  model = Sequential()
  c2d = DepthwiseConv2D((3, 3),
                        data_format="channels_last",
                        use_bias=False,
                        input_shape=shape[1:])
  model.add(c2d)

  if data is None:
    data = np.random.uniform(size=shape)
  out = model.predict(data)
  return model, out
Exemplo n.º 6
0
 def _sepConv_BN(self,x, filters,stride=1, kernel_size=3, rate=1, epsilon=1e-3,name=''):
     if stride == 1:
         depth_padding = 'same'
     else:
         kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
         pad_total = kernel_size_effective - 1
         pad_beg = pad_total // 2
         pad_end = pad_total - pad_beg
         x = ZeroPadding2D((pad_beg, pad_end))(x)
         depth_padding = 'valid'
     x = DepthwiseConv2D((kernel_size, kernel_size), strides=(stride, stride), dilation_rate=(rate, rate),padding=depth_padding, use_bias=False)(x)  # dilation_rate 深度膨胀卷积
     x = BatchNormalization(epsilon=epsilon)(x)
     x = Conv2D(filters, (1, 1), padding='same',use_bias=False)(x)
     x = BatchNormalization(epsilon=epsilon)(x)
     x = Activation('relu')(x)
     return x
Exemplo n.º 7
0
def block(inp, out_filters, exp_ratio):
    channel = K.image_data_format()
    if channel == 'channel_last':
        channel_axis = -1
    else:
        channel_axis = 1
    inp_channel = K.int_shape(inp)[channel_axis]
    exp_filter = inp_channel * exp_ratio
    x = Conv2D(exp_filter, (1, 1), padding='same')(inp)
    x = BatchNormalization()(x)
    x = relu6(x)
    x = DepthwiseConv2D((3, 3), padding='same', strides=(2, 2))(x)
    x = relu6(x)
    x = Conv2D(out_filters, (1, 1), padding='same')(x)
    x = linear(x)
    return x
Exemplo n.º 8
0
    def inverted_residual_block(self,inputs, out_chans, k, t, s, r=False):
        # Depth放大
        tchannel = K.int_shape(inputs)[-1] * t

        x = self.block(inputs, tchannel, 1, 1)
        # 注意这里depth_multiplier这个参数的含义。
        x = DepthwiseConv2D(k, strides=s, depth_multiplier=1, padding='same')(x)
        x = BatchNormalization()(x)
        x = ReLU(max_value=6.0)(x)

        x = Conv2D(out_chans, (1, 1), strides=(1, 1), padding='same')(x)
        x = BatchNormalization()(x)
        if r:
            x = Add()([x, inputs])

        return x
Exemplo n.º 9
0
def layer_0(inputs, filters):
  channel_axis = -1

  x = Conv2D(filters[0], (3,3), padding='same', strides=(2,2))(inputs)
  #x = BatchNormalization(axis=channel_axis)(x)
  x = ReLU(max_value=6)(x)


  x = DepthwiseConv2D((3,3), strides=(1,1), depth_multiplier=1, use_bias=False, padding='same')(x)
  x = BatchNormalization(axis=channel_axis)(x)
  x = ReLU(max_value=6)(x)

  x = Conv2D(filters[1], (1,1), padding='same', strides=(1,1))(x)
  #x = BatchNormalization(axis=channel_axis)(x)

  return x
Exemplo n.º 10
0
 def __beta2Layer__(self, inp_x, n, z):
     x = Conv2D(int(n / 4),
                strides=z,
                kernel_size=1,
                kernel_initializer='he_uniform')(inp_x)
     x = BatchNormalization()(x)
     x = self.__swishLayer__(x)
     x = DepthwiseConv2D(kernel_size=3,
                         padding='same',
                         depthwise_initializer='he_uniform')(x)
     x = BatchNormalization()(x)
     x = self.__swishLayer__(x)
     x = Conv2D(n, kernel_size=1, kernel_initializer='he_uniform')(x)
     x = BatchNormalization()(x)
     x = self.__swishLayer__(x)
     return x
Exemplo n.º 11
0
    def sepconv_batchnorm(x,
                          filters,
                          prefix,
                          stride=1,
                          kernel_size=3,
                          rate=1,
                          depth_activation=False,
                          epsilon=1e-3):
        if stride == 1:
            depth_padding = 'same'
        else:
            effective_kernel_size = kernel_size + (kernel_size - 1) * (rate -
                                                                       1)
            pad = effective_kernel_size - 1
            pad_begin = pad // 2
            pad_end = pad - pad_end
            x = ZeroPadding2D((pad_begin, pad_end))(x)
            depth_padding = 'valid'

        if not depth_activation:
            x = Activation('relu')

        x = DepthwiseConv2D((kernel_size, kernel_size),
                            strides=(stride, stride),
                            dilation_rate=(rate, rate),
                            padding=depth_padding,
                            use_bias=False,
                            name=prefix + '_depthwise')

        x = BatchNormalization(name=prefix + '_depthwise_batch_norm',
                               epsilon=epsilon)(x)

        if depth_activation:
            x = Activation('relu')(x)

        x = Conv2D(filters, (1, 1),
                   padding='same',
                   use_bias=False,
                   name=prefix + '_pointwise')

        x = BatchNormalization(name=prefix + '_pointwise_batch_norm',
                               epsilon=epsilon)(x)

        if depth_activation:
            x = Activation('relu')

        return x
Exemplo n.º 12
0
def _inverted_res_block(inputs, expansion,
                        stride, alpha, filters,
                        block_id, skip_connection, rate=1):
    #in_channels = inputs.shape[-1].value  # inputs._keras_shape[-1]
    in_channels = inputs.shape[-1]
    pointwise_conv_filters = int(filters*alpha)
    pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
    x = inputs
    prefix = 'expanded_conv_{}_'.format(block_id)
    if block_id:
        # Expand
        x = Conv2D(expansion*in_channels, kernel_size=1, padding='same',
                   use_bias=False, activation=None,
                   name=prefix + 'expand')(x)
        x = BatchNormalization(epsilon=1e-3, momentum=0.999,
                               name=prefix + 'expand_BN')(x)
        x = Activation(tf.nn.relu6, name=prefix + 'expand_relu')(x)
    else:
        prefix = 'expanded_conv_'
    # Depthwise
    x = DepthwiseConv2D(kernel_size=3, strides=stride,
                        activation=None,
                        use_bias=False,
                        padding='same',
                        dilation_rate=(rate, rate),
                        name=prefix + 'depthwise')(x)
    x = BatchNormalization(epsilon=1e-3, momentum=0.999,
                           name=prefix + 'depthwise_BN')(x)

    x = Activation(tf.nn.relu6, name=prefix + 'depthwise_relu')(x)

    # Project
    x = Conv2D(pointwise_filters,
               kernel_size=1, padding='same',
               use_bias=False, activation=None,
               name=prefix + 'project')(x)
    x = BatchNormalization(epsilon=1e-3, momentum=0.999,
                           name=prefix + 'project_BN')(x)

    if skip_connection:
        return Add(name=prefix + 'add')([inputs, x])

    # if in_channels == pointwise_filters and stride == 1:
    #    return Add(name='res_connect_' + str(block_id))([inputs, x])

    return x
Exemplo n.º 13
0
 def __init__(self,
              filters,  # NOTE: will be filters // 2
              norm_type="instance",
              pad_type="constant",
              **kwargs):
     super(BasicShuffleUnitV2, self).__init__(name="BasicShuffleUnitV2")
     filters //= 2
     self.model = tf.keras.models.Sequential([
         Conv2D(filters, 1, use_bias=False),
         get_norm(norm_type),
         ReLU(),
         DepthwiseConv2D(3, padding='same', use_bias=False),
         get_norm(norm_type),
         Conv2D(filters, 1, use_bias=False),
         get_norm(norm_type),
         ReLU(),
     ])
Exemplo n.º 14
0
    def __init__(self):
        super(GaussianBlur, self).__init__()

        kernel_size = 3  # set the filter size of Gaussian filter
        kernel_weights = np.asarray([[0.03797616, 0.044863533, 0.03797616],
                                     [0.044863533, 0.053, 0.044863533],
                                     [0.03797616, 0.044863533, 0.03797616]])

        in_channels = 3
        kernel_weights = np.expand_dims(kernel_weights, axis=-1)
        kernel_weights = np.repeat(kernel_weights, in_channels, axis=-1)
        kernel_weights = np.expand_dims(kernel_weights, axis=-1)
        self.g_layer = DepthwiseConv2D(kernel_size,
                                       use_bias=False,
                                       padding='same',
                                       weights=[kernel_weights])
        self.g_layer.trainable = False
def TestNet(input_shape=(128, 128, 3), classes=21):

    inputs = Input(shape=input_shape)

    x, x_0, x_1 = encoder(inputs)
    x_0 = Conv2D(classes, (1, 1))(x_0)
    x_1 = Conv2D(classes, (1, 1))(x_1)
    # 16 * 16 * classes

    x = ZeroPadding2D(padding_size=(1, 1))(x)
    x = Conv2DTranspose(classes,
                        kernel_size=(2, 2),
                        strides=(2, 2),
                        use_bias=False)(x)
    x = BatchNormalization(momentum=0.01)(x)
    # 32 * 32 * classes

    x = Concatenate(axis=3)([x, x_1])
    x = BatchNormalization(momentum=0.01)(x)
    x = LeakyReLU(alpha=0.3)(x)

    x = ESP_Module(x, classes, Add_flag=False)
    x = ZeroPadding2D(padding_size=(1, 1))(x)
    x = Conv2DTranspose(classes,
                        kernel_size=(2, 2),
                        strides=(2, 2),
                        use_bias=False)(x)
    # 64 * 64 * classes

    x = Concatenate(axis=3)([x, x_0])
    x = ZeroPadding2D(padding_size=(1, 1))(x)
    x = DepthwiseConv2D((3, 3))(x)
    x = Conv2D(classes, (1, 1))(x)
    x = BatchNormalization(momentum=0.01)(x)
    x = LeakyReLU(alpha=0.3)(x)

    x = ZeroPadding2D(padding_size=(1, 1))(x)
    x = Conv2DTranspose(classes,
                        kernel_size=(2, 2),
                        strides=(2, 2),
                        use_bias=False)(x)
    # 128 * 128 * classes

    outputs = Activation("softmax")(x)
    model = Model(inputs, outputs)
    return model
Exemplo n.º 16
0
def __bottleneck(inputs, filters_in, filters_out, kernel_size, expansion_coef,
                 se_ratio, stride, dropout_rate):

    # Dimension of the output space after expansion.
    filters_expand = filters_in * expansion_coef

    # Expansion phase.
    if expansion_coef != 1:
        x = Conv2D(filters=filters_expand,
                   kernel_size=1,
                   strides=1,
                   padding='same',
                   kernel_initializer=CONV_KERNEL_INITIALIZER,
                   use_bias=False)(inputs)
        x = BatchNormalization()(x)
        x = Activation(tf.nn.swish)(x)
    else:
        x = inputs

    #Dephtwise conv phase.
    x = DepthwiseConv2D(kernel_size=kernel_size,
                        strides=stride,
                        padding='same',
                        depthwise_initializer=CONV_KERNEL_INITIALIZER,
                        use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation(tf.nn.swish)(x)

    #Squeeze and Excitation phase.
    x = SqueezeExcitation(x, filters_in, filters_expand, se_ratio)

    #Output phase.
    x = Conv2D(filters=filters_out,
               kernel_size=1,
               padding='same',
               kernel_initializer=CONV_KERNEL_INITIALIZER,
               use_bias=False)(x)

    x = BatchNormalization()(x)

    if (stride == 1 and filters_in == filters_out):
        if dropout_rate > 0:
            x = Dropout(dropout_rate)(x)
        x = add([x, inputs])

    return x
Exemplo n.º 17
0
    def build(self, input_shape):
        if tf.keras.backend.image_data_format() == 'channels_last':
            bn_axis = -1
        else:
            bn_axis = 1

        if self.stride == 1:
            depth_padding = 'same'
        else:
            depth_padding = 'valid'

        self.activate = tf.keras.Sequential([Activation(self.activation)])

        # 分离卷积,3x3采用膨胀卷积
        self.expand_conv = tf.keras.Sequential([
            DepthwiseConv2D(kernel_size=(self.kernel_size, self.kernel_size),
                            strides=(self.stride, self.stride),
                            dilation_rate=(self.rate, self.rate),
                            padding=depth_padding,
                            use_bias=False,
                            depthwise_initializer=self.kernel_initializer,
                            depthwise_regularizer=self.kernel_regularizer),
            BatchNormalization(axis=bn_axis,
                               momentum=self.batchnorm_momentum,
                               epsilon=self.batchnorm_epsilon)
        ])
        if self.depth_activation:
            self.expand_conv.add(
                tf.keras.Sequential([Activation(self.activation)]))

        # 1x1卷积,进行压缩
        self.compress_conv = tf.keras.Sequential([
            Conv2D(filters=self.filters,
                   kernel_size=(1, 1),
                   kernel_regularizer=self.kernel_regularizer,
                   kernel_initializer=self.kernel_initializer,
                   strides=(1, 1),
                   padding='same',
                   use_bias=False),
            BatchNormalization(axis=bn_axis,
                               momentum=self.batchnorm_momentum,
                               epsilon=self.batchnorm_epsilon)
        ])
        if self.depth_activation:
            self.compress_conv.add(
                tf.keras.Sequential([Activation(self.activation)]))
Exemplo n.º 18
0
def bottleneck_block(x, filter=3, channels=64, squeeze=16):
    m = Conv2D(channels, (1, 1),
               data_format="channels_last",
               kernel_regularizer=tf.keras.regularizers.l2(lamb),
               activity_regularizer=tf.keras.regularizers.l2(lamb))(x)
    m = BatchNormalization()(m)
    m = Activation('relu')(m)
    m = DepthwiseConv2D(30, (filter, filter), data_format="channels_last")(m)
    m = BatchNormalization()(m)
    m = Activation('relu')(m)
    m = Conv2D(128, (1, 1),
               data_format="channels_last",
               kernel_regularizer=tf.keras.regularizers.l2(lamb),
               activity_regularizer=tf.keras.regularizers.l2(lamb))(m)
    m = BatchNormalization()(m)
    #x = Add()([m,x])
    return Add()([m, x])
    def shuffle_block(x,
                      n_partitions,
                      n_filters,
                      reduction,
                      init_weights=None):
        ''' Construct a shuffle Shuffle block  
            x           : input to the block
            n_partitions: number of groups to partition feature maps (channels) into.
            n_filters   : number of filters
            reduction   : dimensionality reduction factor (e.g, 0.25)
        '''
        if init_weights is None:
            init_weights = ShuffleNet.init_weights

        # identity shortcut
        shortcut = x

        # pointwise group convolution, with dimensionality reduction
        x = ShuffleNet.pw_group_conv(x,
                                     n_partitions,
                                     int(reduction * n_filters),
                                     init_weights=init_weights)
        x = ReLU()(x)

        # channel shuffle layer
        x = ShuffleNet.channel_shuffle(x, n_partitions)

        # Depthwise 3x3 Convolution
        x = DepthwiseConv2D((3, 3),
                            strides=1,
                            padding='same',
                            use_bias=False,
                            kernel_initializer=init_weights)(x)
        x = BatchNormalization()(x)

        # pointwise group convolution, with dimensionality restoration
        x = ShuffleNet.pw_group_conv(x,
                                     n_partitions,
                                     n_filters,
                                     init_weights=init_weights)

        # Add the identity shortcut (input added to output)
        x = Add()([shortcut, x])
        x = ReLU()(x)
        return x
Exemplo n.º 20
0
def inverted_residual_block(inputs, expanded_channels, output_channels,
                            strides, block_id):
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
    prefix = 'mv2_block_{}_'.format(block_id)

    # Expand
    x = Conv2D(expanded_channels,
               1,
               padding='same',
               use_bias=False,
               name=prefix + '_expand')(inputs)
    x = BatchNormalization(axis=channel_axis,
                           momentum=0.1,
                           name=prefix + 'expand_BN')(x)
    x = Activation(swish, name=prefix + 'expand_swish')(x)

    # Depthwise
    if strides == 2:
        x = ZeroPadding2D(padding=correct_pad(K, x, 3), name=prefix + 'pad')(x)

    x = DepthwiseConv2D(kernel_size=3,
                        strides=strides,
                        activation=None,
                        use_bias=False,
                        padding='same' if strides == 1 else 'valid',
                        name=prefix + 'depthwise')(x)
    x = BatchNormalization(axis=channel_axis,
                           momentum=0.1,
                           name=prefix + 'depthwise_BN')(x)
    x = Activation(swish, name=prefix + 'depthwise_swish')(x)

    # Project
    x = Conv2D(output_channels,
               kernel_size=1,
               padding='same',
               use_bias=False,
               activation=None,
               name=prefix + 'project')(x)
    x = BatchNormalization(axis=channel_axis,
                           momentum=0.1,
                           name=prefix + 'project_BN')(x)

    if inputs.shape[-1] == output_channels and strides == 1:
        return Add(name=prefix + 'add')([inputs, x])
    return x
Exemplo n.º 21
0
def CNN_submodel_4(INPUTS, l2):
    conv1 = Conv2D(32, (1, 128), padding='same',
                   input_shape=(12, 1024, 1))(INPUTS)
    conv1 = BatchNormalization(axis=1)(conv1)
    conv1 = DepthwiseConv2D((12, 1),
                            depth_multiplier=3,
                            depthwise_constraint=max_norm(1.))(conv1)
    conv1 = BatchNormalization(axis=1)(conv1)
    conv1 = Activation('elu')(conv1)
    conv1 = AveragePooling2D((1, 4))(conv1)
    conv1 = Dropout(0.5)(conv1)

    conv2 = SeparableConv2D(64, (1, 16), padding='same')(conv1)
    conv2 = BatchNormalization(axis=1)(conv2)
    conv2 = Activation('elu')(conv2)
    conv2 = AveragePooling2D((1, 4))(conv2)
    conv2 = Dropout(0.5)(conv2)

    conv3 = SeparableConv2D(64, (1, 16), padding='same')(conv2)
    conv3 = BatchNormalization(axis=1)(conv3)
    conv3 = Activation('elu')(conv3)
    conv3 = AveragePooling2D((1, 4))(conv3)
    conv3 = Dropout(0.5)(conv3)

    conv4 = SeparableConv2D(64, (1, 16), padding='same')(conv3)
    conv4 = BatchNormalization(axis=1)(conv4)
    conv4 = Activation('elu')(conv4)
    conv4 = AveragePooling2D((1, 4))(conv4)
    conv4 = Dropout(0.5)(conv4)

    conv5 = SeparableConv2D(64, (1, 16), padding='same')(conv4)
    conv5 = BatchNormalization(axis=1)(conv5)
    conv5 = Activation('elu')(conv5)
    conv5 = AveragePooling2D((1, 4))(conv5)
    conv5 = Dropout(0.5)(conv5)

    flatten = Flatten()(conv5)

    dense = Dense(16,
                  activation="elu",
                  kernel_regularizer=keras.regularizers.l2(l2))(flatten)
    dense = Dense(2, kernel_constraint=max_norm(0.01))(dense)
    result = Activation('softmax')(dense)

    return Model(inputs=INPUTS, outputs=result)
Exemplo n.º 22
0
def _bottleneck(x_input,
                filters,
                kernel,
                t,
                s,
                kernel_regulizer,
                dropout_rate,
                base_name,
                depth_multiplier=1,
                r=False):
    """
    """

    t_channels = x_input.shape[3] * 6
    x = _conv_block(x_input,
                    t_channels,
                    kernel=(1, 1),
                    stride=(1, 1),
                    kernel_regulizer=kernel_regulizer,
                    dropout_rate=dropout_rate,
                    base_name=base_name + '_expand')
    #     x = ZeroPadding2D(((1, 0), (1, 0)))(x)
    # TODO depth_multiper可以作为改的
    x = DepthwiseConv2D(kernel_size=kernel,
                        strides=(s, s),
                        depth_multiplier=1,
                        padding='same',
                        name=base_name + '_depthwise',
                        use_bias=False)(x)
    if dropout_rate:
        x = Dropout(dropout_rate)(x)
    x = BatchNormalization(name=base_name + '_depthwise' + '_BN')(x)
    x = Activation(tf.nn.relu6, name=base_name + '_depthwise' + '_relu')(x)
    x = Conv2D(filters, (1, 1),
               strides=(1, 1),
               padding='same',
               name=base_name + '_project',
               use_bias=False)(x)

    if dropout_rate:
        x = Dropout(dropout_rate)(x)
    x = BatchNormalization(name=base_name + '_project' + '_BN')(x)
    if r:
        x = add([x, x_input], name=base_name + '_add')
    return x
Exemplo n.º 23
0
def fake_inception(D, F2, kernLength, Chans, input, dropoutRate, dropoutType):
    block3 = DepthwiseConv2D((Chans, 1),
                             use_bias=False,
                             depth_multiplier=D,
                             depthwise_constraint=max_norm(1.))(input)
    block3 = BatchNormalization()(block3)
    block3 = Activation('ReLU')(block3)
    block3 = AveragePooling2D((1, 8))(block3)
    block3 = dropoutType(dropoutRate)(block3)

    block4 = SeparableConv2D(F2, (1, kernLength),
                             use_bias=False,
                             padding='same')(block3)
    block4 = BatchNormalization()(block4)
    block4 = Activation('ReLU')(block4)
    block4 = AveragePooling2D((1, 4))(block4)
    block4 = dropoutType(dropoutRate)(block4)
    return Flatten(name='flatten' + str(kernLength))(block4)
Exemplo n.º 24
0
def bottleneck(inputs, filters, kernel, t, alpha, s, r=False):
  channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
  tchannel = K.int_shape(inputs)[channel_axis] * t
  cchannel = int(filters * alpha)

  x = conv_block(inputs, tchannel, (1, 1), (1, 1))

  x = DepthwiseConv2D(kernel, strides=(s, s), depth_multiplier=1, padding='same')(x)
  x = BatchNormalization(axis=channel_axis)(x)
  x = Activation(relu)(x)

  x = Conv2D(cchannel, (1, 1), strides=(1, 1), padding='same')(x)
  x = BatchNormalization(axis=channel_axis)(x)

  if r:
    x = Add()([x, inputs])

  return x
Exemplo n.º 25
0
def conv_blocks(x, num_filter, activation='relu', num_iterations=1):
    for num_iter in range(0, num_iterations):
        if activation == 'relu':
            x = ReLU()(x)
        elif activation == 'prelu':
            x = PReLU(alpha_initializer='uniform', shared_axes=[1, 2])(x)
        shortcut = x
        x = DepthwiseConv2D(kernel_size=(5, 5),
                            strides=(1, 1),
                            padding='same',
                            use_bias=True)(x)
        x = Conv2D(num_filter,
                   kernel_size=(1, 1),
                   strides=(1, 1),
                   padding='valid',
                   use_bias=True)(x)
        x = Add()([shortcut, x])
    return x
Exemplo n.º 26
0
def downsample_xception_block(x, channels, top_relu=False):
    if (top_relu):
        x = Activation("relu")(x)
    x = SeparableConv2D(channels, (3, 3), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    ##separable conv2
    x = DepthwiseConv2D((3, 3), padding="same")(x)
    x = BatchNormalization()(x)
    x = Conv2D(channels, (1, 1), padding="same")(x)
    skip = BatchNormalization()(x)
    x = Activation("relu")(skip)

    ##separable conv3
    x = SeparableConv2D(channels, (3, 3), strides=2, padding='same')(x)
    x = BatchNormalization()(x)
    return x, skip
Exemplo n.º 27
0
def bottleneck(inputs, filters, kernel, t, s, r=False, se=False):
    channel_axis = 1 if K.image_data_format() == "channels_first" else -1
    tchannel = K.int_shape(inputs)[channel_axis] * t
    Z1 = conv_block(inputs, tchannel, 1, 1, "same")
    Z1 = DepthwiseConv2D(kernel,
                         strides=s,
                         padding="same",
                         depth_multiplier=1,
                         use_bias=False)(Z1)
    Z1 = BatchNormalization(axis=channel_axis)(Z1)
    A1 = PReLU(shared_axes=[1, 2])(Z1)
    Z2 = Conv2D(filters, 1, strides=1, padding="same", use_bias=False)(A1)
    Z2 = BatchNormalization(axis=channel_axis)(Z2)
    if se:
        Z2 = se_block(Z2)
    if r:
        Z2 = add([Z2, inputs])
    return Z2
Exemplo n.º 28
0
def pep_x_module(inputs, block_id, filter_size=[64,64,128]):

    x = Conv2D(filter_size[0], kernel_size=(1,1), padding='valid')(inputs)
    x = BatchNormalization(epsilon=1e-5)(x)
    x = Activation('relu')(x)
    
    x = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]])
    
    x = DepthwiseConv2D(kernel_size=(3,3), strides=(1,1), padding='valid')(x)
    x = BatchNormalization(epsilon=1e-5)(x)
    
    x = Conv2D(filter_size[1], kernel_size=(1,1), padding='valid')(x)
    x = BatchNormalization(epsilon=1e-5)(x)
    x = Activation('relu')(x)
    
    x = Conv2D(filter_size[2], kernel_size=(1,1), padding='valid')(x)

    return x
Exemplo n.º 29
0
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id, kn_size=3, se_connect=False, activation='relu6'):
    in_channels = inputs.shape[-1]
    pointwise_conv_filters = int(filters * alpha)
    pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
    if activation == 'hswish':
        act_func = hard_swish
    else:
        act_func = tf.nn.relu6

    if block_id:
        # Expand
        x = Conv2D(expansion * in_channels, kernel_size=1, padding='same',
                   use_bias=False, activation=None, kernel_initializer=initializer,
                   name='mobl%d_conv_expand' % block_id)(inputs)
        x = BatchNormalization(epsilon=1e-3, momentum=0.999,
                               name='bn%d_conv_bn_expand' %
                                    block_id)(x)
        x = Activation(act_func, name='block_%d_expand_relu' % block_id)(x)
    else:
        x = inputs

    # Depthwise
    x = DepthwiseConv2D(kernel_size=kn_size, strides=stride, activation=None,
                        use_bias=False, padding='same', kernel_initializer=initializer,
                        name='mobl%d_conv_depthwise' % block_id)(x)
    x = BatchNormalization(epsilon=1e-3, momentum=0.999,
                           name='bn%d_conv_depthwise' % block_id)(x)

    x = Activation(tf.nn.relu6, name='conv_dw_%d_relu' % block_id)(x) if activation == 'relu6' else hard_swish(x)

    if se_connect:
        x = squeeze_excitation_block(x)

    # Project
    x = Conv2D(pointwise_filters,
               kernel_size=1, padding='same', use_bias=False, activation=None,kernel_initializer=initializer,
               name='mobl%d_conv_project' % block_id)(x)
    x = BatchNormalization(epsilon=1e-3, momentum=0.999,
                           name='bn%d_conv_bn_project' % block_id)(x)

    if in_channels == pointwise_filters and stride == 1:
        return Add(name='res_connect_' + str(block_id))([inputs, x])

    return x
Exemplo n.º 30
0
def mobile_facenet(emb_shape=128,
                   input_shape=(112, 112, 3),
                   dropout=1,
                   name="mobile_facenet",
                   weight_file=None,
                   use_se=False,
                   include_top=True):
    channel_axis = 1 if K.image_data_format() == "channels_first" else -1
    if K.image_data_format() == "channels_first":
        X = Input(shape=(input_shape[-1], input_shape[0], input_shape[1]))
    else:
        X = Input(shape=input_shape)
    M = conv_block(X, 64, 3, 2, "same")  # Output Shape: (56, 56, 64)
    M = separable_conv_block(M, 64, 3, 1)  # (56, 56, 64)
    M = inverted_residual_block(M, 64, 3, t=2, strides=2, n=5,
                                se=use_se)  # (28, 28, 64)
    M = inverted_residual_block(M, 128, 3, t=4, strides=2, n=1,
                                se=use_se)  # (14, 14, 128)
    M = inverted_residual_block(M, 128, 3, t=2, strides=1, n=6,
                                se=use_se)  # (14, 14, 128)
    M = inverted_residual_block(M, 128, 3, t=4, strides=2, n=1,
                                se=use_se)  # (7, 7, 128)
    M = inverted_residual_block(M, 128, 3, t=2, strides=1, n=2,
                                se=use_se)  # (7, 7, 128)
    if include_top:
        """ GDC """
        M = Conv2D(512, 1, use_bias=False)(M)  # (7, 7, 512)
        M = BatchNormalization(axis=channel_axis)(M)
        M = PReLU(shared_axes=[1, 2])(M)
        M = DepthwiseConv2D(int(M.shape[1]),
                            depth_multiplier=1,
                            use_bias=False)(M)  # (1, 1, 512)
        M = BatchNormalization(axis=channel_axis)(M)

        if dropout > 0 and dropout < 1:
            M = Dropout(dropout)(M)
        M = Conv2D(emb_shape, 1, use_bias=False, activation=None)(M)
        M = Flatten()(M)
        M = BatchNormalization(axis=channel_axis, name="embedding")(M)

    model = Model(inputs=X, outputs=M, name=name)
    if weight_file:
        model.load_weights(weight_file)
    return model