コード例 #1
0
ファイル: mobilenet.py プロジェクト: MrKamiZhou/EAST
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id):
    in_channels = backend.int_shape(inputs)[-1]
    pointwise_conv_filters = int(filters * alpha)
    pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
    x = inputs
    prefix = 'block_{}_'.format(block_id)

    if block_id:
        # Expand
        x = layers.Conv2D(expansion * in_channels,
                          kernel_size=1,
                          padding='same',
                          use_bias=False,
                          activation=None,
                          name=prefix + 'expand')(x)
        x = layers.BatchNormalization(epsilon=1e-3,
                                      momentum=0.999,
                                      name=prefix + 'expand_BN')(x)
        x = layers.ReLU(6., name=prefix + 'expand_relu')(x)
    else:
        prefix = 'expanded_conv_'

    # Depthwise
    if stride == 2:
        x = layers.ZeroPadding2D(padding=correct_pad(backend, x, 3),
                                 name=prefix + 'pad')(x)
    x = layers.DepthwiseConv2D(kernel_size=3,
                               strides=stride,
                               activation=None,
                               use_bias=False,
                               padding='same' if stride == 1 else 'valid',
                               name=prefix + 'depthwise')(x)
    x = layers.BatchNormalization(epsilon=1e-3,
                                  momentum=0.999,
                                  name=prefix + 'depthwise_BN')(x)

    x = layers.ReLU(6., name=prefix + 'depthwise_relu')(x)

    # Project
    x = layers.Conv2D(pointwise_filters,
                      kernel_size=1,
                      padding='same',
                      use_bias=False,
                      activation=None,
                      name=prefix + 'project')(x)
    x = layers.BatchNormalization(epsilon=1e-3,
                                  momentum=0.999,
                                  name=prefix + 'project_BN')(x)

    if in_channels == pointwise_filters and stride == 1:
        return layers.Add(name=prefix + 'add')([inputs, x])
    return x
コード例 #2
0
 def test_tiny_depthwise_conv_valid_pad_depth_multiplier(self):
     input_dim = 16
     input_shape = (input_dim, input_dim, 3)
     depth_multiplier = 2
     kernel_height = 3
     kernel_width = 3
     model = tf.keras.Sequential()
     model.add(layers.DepthwiseConv2D(
         depth_multiplier=depth_multiplier,
         kernel_size=(kernel_height, kernel_width),
         input_shape=input_shape, padding='valid', strides=(1, 1)))
     model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
     self._test_model(model)
コード例 #3
0
def depthwiseConv(kernel_size,
                  strides=1,
                  depth_multiplier=1,
                  dilation_rate=1,
                  use_bias=False):
    return layers.DepthwiseConv2D(
        kernel_size,
        strides=strides,
        depth_multiplier=depth_multiplier,
        padding='same',
        use_bias=use_bias,
        kernel_regularizer=regularizers.l2(l=0.0003),
        dilation_rate=dilation_rate)
コード例 #4
0
def get_model(conv1, conv2, conv3, conv4):
    model = models.Sequential([
      layers.Input(shape=shape),
      layers.Conv2D(filters=conv1, kernel_size=[3,3], strides=[2,1], use_bias=False),
      layers.BatchNormalization(momentum=0.1),
      layers.ReLU(),
      layers.DepthwiseConv2D(kernel_size=[3,3], strides=[1,1], use_bias=False),
      layers.Conv2D(filters=conv2, kernel_size=[1,1], strides=[1,1], use_bias=False),
      layers.BatchNormalization(momentum=0.1),
      layers.ReLU(),
      layers.DepthwiseConv2D(kernel_size=[3,3], strides=[1,1], use_bias=False),
      layers.Conv2D(filters=conv3, kernel_size=[1,1], strides=[1,1], use_bias=False),
      layers.BatchNormalization(momentum=0.1),
      layers.ReLU(),
      layers.DepthwiseConv2D(kernel_size=[3,3], strides=[1,1], use_bias=False),
      layers.Conv2D(filters=conv4, kernel_size=[1,1], strides=[1,1], use_bias=False),
      layers.BatchNormalization(momentum=0.1),
      layers.ReLU(),
      layers.GlobalAveragePooling2D(),
      layers.Dense(num_labels)
    ])
    return model 
コード例 #5
0
def _depthwise_conv_block(inputs, pointwise_conv_filters, params,
                          strides=(1, 1), dropout=2**-6, block_id=1):
    """Adds a depth-wise convolution block."""

    p = params
    channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
    pointwise_conv_filters = int(pointwise_conv_filters * p.alpha)

    if p.activity_regularizer is not None:
        activity_regularizer = regularizers.l2(p.activity_regularizer)
    else:
        activity_regularizer = None

    if p.max_value is not None:
        max_constraint = MaxConstraint(p.max_value)
    else:
        max_constraint = None

    x = layers.DepthwiseConv2D((3, 3),
                               padding='same',
                               strides=strides,
                               use_bias=p.use_bias,
                               activity_regularizer=activity_regularizer,
                               kernel_regularizer=regularizers.l2(p.weight_regularizer),
                               kernel_constraint=max_constraint,
                               bias_constraint=max_constraint,
                               name='conv_dw_%d' % block_id)(inputs)
    x = layers.BatchNormalization(axis=channel_axis,
                                  beta_constraint=max_constraint,
                                  gamma_constraint=max_constraint,
                                  name='conv_dw_%d_bn' % block_id)(x)
    x = layers.ReLU(max_value=params.max_value, name='conv_dw_%d_relu' % block_id)(x)

    x = layers.Conv2D(pointwise_conv_filters, (1, 1),
                      padding='same',
                      use_bias=p.use_bias,
                      strides=(1, 1),
                      activity_regularizer=activity_regularizer,
                      kernel_regularizer=regularizers.l2(p.weight_regularizer),
                      kernel_constraint=max_constraint,
                      bias_constraint=max_constraint,
                      name='conv_pw_%d' % block_id)(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  beta_constraint=max_constraint,
                                  gamma_constraint=max_constraint,
                                  name='conv_pw_%d_bn' % block_id)(x)
    x = layers.ReLU(max_value=params.max_value, name='conv_pw_%d_relu' % block_id)(x)
    if dropout > 0:
        x = layers.Dropout(dropout)(x)

    return x
コード例 #6
0
def _depth_wise_conv(inputs,
                     kernel_size,
                     strides,
                     data_format,
                     name="_depth_wise_conv"):
    x = layers.DepthwiseConv2D(
        kernel_size,  # [kernel_size,kernel_size] filter
        strides=strides,
        depthwise_initializer=CONV_KERNEL_INITIALIZER,
        padding='same',
        data_format=data_format,
        name="depth_wise_conv" + name,
        use_bias=False)(inputs)
    return x
コード例 #7
0
def _inverted_res_block(x, expansion, filters, kernel_size, stride, se_ratio,
                        activation, block_id):
    channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
    shortcut = x
    prefix = 'expanded_conv/'
    infilters = backend.int_shape(x)[channel_axis]
    if block_id:
        # Expand
        prefix = 'expanded_conv_{}/'.format(block_id)
        x = layers.Conv2D(_depth(infilters * expansion),
                          kernel_size=1,
                          padding='same',
                          use_bias=False,
                          name=prefix + 'expand')(x)
        x = layers.BatchNormalization(axis=channel_axis,
                                      epsilon=1e-3,
                                      momentum=0.999,
                                      name=prefix + 'expand/BatchNorm')(x)
        x = layers.Activation(activation)(x)

    if stride == 2:
        x = layers.ZeroPadding2D(padding=correct_pad(backend, x, kernel_size),
                                 name=prefix + 'depthwise/pad')(x)
    x = layers.DepthwiseConv2D(kernel_size,
                               strides=stride,
                               padding='same' if stride == 1 else 'valid',
                               use_bias=False,
                               name=prefix + 'depthwise')(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  epsilon=1e-3,
                                  momentum=0.999,
                                  name=prefix + 'depthwise/BatchNorm')(x)
    x = layers.Activation(activation)(x)

    if se_ratio:
        x = _se_block(x, _depth(infilters * expansion), se_ratio, prefix)

    x = layers.Conv2D(filters,
                      kernel_size=1,
                      padding='same',
                      use_bias=False,
                      name=prefix + 'project')(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  epsilon=1e-3,
                                  momentum=0.999,
                                  name=prefix + 'project/BatchNorm')(x)

    if stride == 1 and infilters == filters:
        x = layers.Add(name=prefix + 'Add')([shortcut, x])
    return x
コード例 #8
0
    def __init__(self, n_filters, k_size=3, stride=1, d_rate=1, activation=False):
        super(SeparableConvBlock, self).__init__()
        pad_type = "same" if stride == 1 else "valid"

        self.blocks = [
            custom_pad(k_size, d_rate) if stride > 1 else None,
            layers.ReLU() if activation is False else None,
            layers.DepthwiseConv2D(k_size, stride, pad_type, use_bias=False, dilation_rate=d_rate),
            layers.BatchNormalization(),
            layers.ReLU() if activation is True else None,
            layers.Conv2D(n_filters, 1, 1, "same", use_bias=False),
            layers.BatchNormalization(),
            layers.ReLU() if activation is True else None
        ]
コード例 #9
0
    def __init__(self, channels, kernel_size, strides):
        super(SeparableConv2D, self).__init__()

        self.channels = channels
        self.kernel_size = kernel_size
        self.strides = strides

        self.l2_regularizer_00004 = regularizers.l2(0.00004)
        self.dw_conv = layers.DepthwiseConv2D(kernel_size=kernel_size, strides=strides, padding="SAME",
                                              kernel_regularizer=self.l2_regularizer_00004)
        self.conv = layers.Conv2D(filters=channels, kernel_size=(1, 1), strides=(1, 1), padding='SAME')
        self.relu = layers.ReLU()
        self.relu6 = layers.ReLU(max_value=6)
        self.bn = layers.BatchNormalization(momentum=0.999)
コード例 #10
0
def _depthwise_conv2d_bn(x,
                         filters,
                         kernel_size=(3, 3),
                         padding='same',
                         strides=(1, 1),
                         name=None):
    """Utility function to apply factorized (depthwise & pointwise) conv + BN.

    # Arguments
        x: input tensor.
        filters: number of (pointwise) output channels.
        kernel_size: kernel size of the (depthwise) convolution.
        padding: padding mode of the depthwise convolution.
        strides: strides of the (depthwise) convolution.
        name: name of the ops; will become
              `name + '_dw_conv'` for the depthwise convolution,
              `name + '_dw_bn'` for the depthwise batch norm layer,
              `name + '_dw_relu'` for the depthwise relu layer,
              `name + '_pw_conv'` for the pointwise convolution,
              `name + '_pw_bn'` for the pointwise batch norm layer,

    # Returns
        Output tensor after applying the factorized conv + BN.
    """
    if name is not None:
        dw_conv_name = name + '_dw_conv'
        dw_bn_name = name + '_dw_bn'
        dw_relu_name = name + '_dw_relu'
        pw_conv_name = name + '_pw_conv'
        pw_bn_name = name + '_pw_bn'
    else:
        dw_conv_name, dw_bn_name, dw_relu_name = None, None, None
        pw_conv_name, pw_bn_name = None, None
    bn_axis = 1 if backend.image_data_format() == 'channels_first' else 3
    x = layers.DepthwiseConv2D(kernel_size=kernel_size,
                               strides=strides,
                               padding=padding,
                               use_bias=False,
                               name=dw_conv_name)(x)
    x = layers.BatchNormalization(axis=bn_axis, name=dw_bn_name)(x)
    x = layers.Activation('relu', name=dw_relu_name)(x)
    x = layers.Conv2D(filters=filters,
                      kernel_size=(1, 1),
                      strides=(1, 1),
                      padding='same',
                      use_bias=False,
                      name=pw_conv_name)(x)
    x = layers.BatchNormalization(axis=bn_axis, name=pw_bn_name)(x)
    x = layers.Activation('relu', name=name)(x)
    return x
コード例 #11
0
ファイル: CAFA.py プロジェクト: junhahyung/paf
        def branch_block(net, depth_ksize=3, depth_strides=2, conv_filters=16, conv_ksize=1, conv_strides=1, pad=True):
            branch_1 = layers.DepthwiseConv2D(kernel_size=depth_ksize, strides=depth_strides, padding='same',
                                       kernel_regularizer=tf.keras.regularizers.l2(0.01))(net)
            branch_1 = layers.Conv2D(filters=conv_filters, kernel_size=conv_ksize, strides=conv_strides, padding='same',
                              kernel_regularizer=tf.keras.regularizers.l2(0.01))(branch_1)

            branch_2 = layers.MaxPooling2D(pool_size=2)(net)
            if pad:
                branch_2 = tf.pad(branch_2, paddings=[[0, 0], [0, 0], [0, 0], [0, int(conv_filters/2)]], mode='CONSTANT', constant_values=0)

            net = layers.Add()([branch_1, branch_2])
            net = layers.PReLU(alpha_initializer='zeros', shared_axes=[1, 2])(net)

            return net
コード例 #12
0
 def __init__(self,
              kernel_size: int = 3,
              strides: int = 1,
              padding: str = 'same',
              **kwargs):
     super(DWConvBN, self).__init__(**kwargs)
     self.dw_conv = layers.DepthwiseConv2D(
         kernel_size=kernel_size,
         strides=strides,
         padding=padding,
         use_bias=False,
         kernel_regularizer=tf.keras.regularizers.l2(4e-5),
         name="dw1")
     self.bn = layers.BatchNormalization(momentum=0.9, name="bn")
コード例 #13
0
    def __init__(self,
                 in_planes,
                 out_planes,
                 expansion,
                 stride,
                 weight_decay=1e-4):
        super(Block, self).__init__()
        self.stride = stride
        self.in_planes = in_planes
        self.out_planes = out_planes
        planes = in_planes * expansion
        self.conv1 = layers.Conv2D(
            filters=planes,
            kernel_size=1,
            strides=1,
            padding='valid',
            use_bias=False,
            kernel_initializer='he_normal',
            kernel_regularizer=keras.regularizers.l2(weight_decay))
        self.bn1 = layers.BatchNormalization()
        self.conv2 = layers.DepthwiseConv2D(
            kernel_size=3,
            strides=stride,
            padding='same',
            use_bias=False,
            depthwise_initializer='he_normal',
            depthwise_regularizer=keras.regularizers.l2(weight_decay))
        self.bn2 = layers.BatchNormalization()
        self.conv3 = layers.Conv2D(
            filters=out_planes,
            kernel_size=1,
            strides=1,
            padding='valid',
            use_bias=False,
            kernel_initializer='he_normal',
            kernel_regularizer=keras.regularizers.l2(weight_decay))
        self.bn3 = layers.BatchNormalization()

        if stride == 1 and in_planes != out_planes:
            self.shortcut = keras.Sequential()
            self.shortcut.add(
                layers.Conv2D(
                    filters=out_planes,
                    kernel_size=1,
                    strides=1,
                    padding='valid',
                    use_bias=False,
                    kernel_initializer='he_normal',
                    kernel_regularizer=keras.regularizers.l2(weight_decay)))
            self.shortcut.add(layers.BatchNormalization())
コード例 #14
0
    def __init__(self,
                 block_args,
                 global_params,
                 drop_connect_rate=None,
                 name='mbconvblock',
                 **kwargs):
        super().__init__(name=name, **kwargs)
        batch_norm_momentum = global_params.batch_norm_momentum
        batch_norm_epsilon = global_params.batch_norm_epsilon
        self.has_se = (block_args.se_ratio is not None) and (
            block_args.se_ratio > 0) and (block_args.se_ratio <= 1)

        filters = block_args.input_filters * block_args.expand_ratio
        kernel_size = block_args.kernel_size
        self.block_args = block_args
        self.drop_connect_rate = drop_connect_rate
        self.conv = KL.Conv2D(filters,
                              kernel_size=[1, 1],
                              strides=[1, 1],
                              kernel_initializer=conv_kernel_initializer,
                              padding='same',
                              use_bias=False)
        self.norm = KL.BatchNormalization(axis=-1,
                                          momentum=batch_norm_momentum,
                                          epsilon=batch_norm_epsilon)
        self.act = Swish()  #KL.ReLU()

        self.conv1 = KL.DepthwiseConv2D(
            [kernel_size, kernel_size],
            strides=block_args.strides,
            depthwise_initializer=conv_kernel_initializer,
            padding='same',
            use_bias=False)
        self.norm1 = KL.BatchNormalization(axis=-1,
                                           momentum=batch_norm_momentum,
                                           epsilon=batch_norm_epsilon)
        self.act1 = Swish()  #KL.ReLU()

        self.seblock = SEBlock(block_args, global_params)

        self.conv2 = KL.Conv2D(block_args.output_filters,
                               kernel_size=[1, 1],
                               strides=[1, 1],
                               kernel_initializer=conv_kernel_initializer,
                               padding='same',
                               use_bias=False)
        self.norm2 = KL.BatchNormalization(axis=-1,
                                           momentum=batch_norm_momentum,
                                           epsilon=batch_norm_epsilon)
        self.dropconnect = DropConnect(drop_connect_rate)
コード例 #15
0
ファイル: mobilenetV2.py プロジェクト: Xianhua-He/CNN_TF2
 def __init__(self, input_channels, output_channels, expansion_factor,
              stride):
     super(BottleNeck, self).__init__()
     self.stride = stride
     self.input_channels = input_channels
     self.output_channels = output_channels
     self.conv1 = layers.Conv2D(input_channels * expansion_factor, 1, 1,
                                "same")
     self.bn1 = layers.BatchNormalization()
     self.dwconv = layers.DepthwiseConv2D(3, stride, "same")
     self.bn2 = layers.BatchNormalization()
     self.conv2 = layers.Conv2D(output_channels, 1, 1, "same")
     self.bn3 = layers.BatchNormalization()
     self.linear = layers.Activation(tf.keras.activations.linear)
コード例 #16
0
def SeperableConv2d(out_dim, kernel_size, stride):
    sperable_conv = Sequential([

        # Depthwise
        layers.DepthwiseConv2D(kernel_size=kernel_size,
                               strides=(stride, stride),
                               padding='same',
                               activation='relu'),

        # Pointwise
        layers.Conv2D(out_dim, kernel_size=1)
    ])

    return sperable_conv
コード例 #17
0
def conv_block(input_tensor, c, s, t, expand=True):
    """
    Convolutional Block for mobile net v2
    Args:
        input_tensor (keras tensor): input tensor
        c (int): output channels
        s (int): stride size of first layer in the series
        t (int): expansion factor
        expand (bool): expand filters or not?

    Returns: keras tensor
    """
    first_conv_channels = input_tensor.get_shape()[-1]
    if expand:
        x = layers.Conv2D(
            first_conv_channels*t,
            1,
            1,
            padding='same',
            use_bias=False
        )(input_tensor)
        x = layers.BatchNormalization()(x)
        x = layers.ReLU(6.0)(x)
    else:
        x = input_tensor

    x = layers.DepthwiseConv2D(
        3,
        s,
        'same',
        1,
        use_bias=False
    )(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU(6.0)(x)

    x = layers.Conv2D(
        c,
        1,
        1,
        padding='same',
        use_bias=False
    )(x)
    x = layers.BatchNormalization()(x)

    if input_tensor.get_shape() == x.get_shape() and s == 1:
        return x+input_tensor

    return x
コード例 #18
0
def inverted_res_block(inputs: tf.Tensor, filters: int, expansion: float,
                       kernel_size: int, strides: int, se_ratio: Union[float,
                                                                       None],
                       activation, name: str) -> tf.Tensor:
    input_filters = backend.int_shape(inputs)[CHANNEL_AXIS]
    x = inputs
    if expansion != 1:
        expand_filters = make_divisible(input_filters * expansion)
        x = layers.Conv2D(expand_filters,
                          1,
                          padding='same',
                          use_bias=False,
                          name=f'{name}/expand')(x)
        x = layers.BatchNormalization(axis=CHANNEL_AXIS,
                                      epsilon=1e-3,
                                      momentum=0.999,
                                      name=f'{name}/expand/BatchNorm')(x)
        x = activation(x)
    else:
        expand_filters = input_filters

    x = layers.DepthwiseConv2D(kernel_size,
                               strides=strides,
                               padding='same',
                               use_bias=False,
                               name=f'{name}/depthwise')(x)
    x = layers.BatchNormalization(axis=CHANNEL_AXIS,
                                  epsilon=1e-3,
                                  momentum=0.999,
                                  name=f'{name}/depthwise/BatchNorm')(x)
    x = activation(x)

    if se_ratio:
        x = se_block(x, expand_filters, se_ratio, name)

    x = layers.Conv2D(filters,
                      1,
                      padding='same',
                      use_bias=False,
                      name=f'{name}/project')(x)
    x = layers.BatchNormalization(axis=CHANNEL_AXIS,
                                  epsilon=1e-3,
                                  momentum=0.999,
                                  name=f'{name}/project/BatchNorm')(x)

    if strides == 1 and input_filters == filters:
        x = layers.Add(name=f'{name}/Add')([inputs, x])

    return x
コード例 #19
0
ファイル: mobile_net.py プロジェクト: Runist/YOLOv4
def invertedResidual(inputs, in_channel, out_channel, stride, expand_ratio):
    """
    倒残差结构
    :param inputs: 输入特征层
    :param in_channel: 输入通道数
    :param out_channel: 输出通道数
    :param stride: 步长
    :param expand_ratio: 倍乘因子
    :return: 输出特征层
    """
    if "block_id" not in invertedResidual.__dict__:
        invertedResidual.block_id = 0
    invertedResidual.block_id += 1

    # 倍乘率是决定中间的倒残差结构的通道数
    hidden_channel = in_channel * expand_ratio
    prefix = "Block_{}_".format(invertedResidual.block_id)

    if expand_ratio != 1:
        x = conv_bn_relu(inputs, hidden_channel, kernel_size=1, padding='same', name=prefix + "expand_")
    else:
        x = inputs

    if stride == 2:
        x = layers.ZeroPadding2D(padding=((0, 1), (0, 1)), name=prefix + 'zero_pad')(x)

    # 3x3 depthwise conv
    x = layers.DepthwiseConv2D(kernel_size=3,
                               padding="same" if stride == 1 else 'valid',
                               strides=stride,
                               use_bias=False,
                               name=prefix + 'depthwise_Conv2D')(x)
    x = layers.BatchNormalization(name=prefix + "depthwise_BN")(x)
    x = layers.ReLU(6.0, name=prefix + "depthwise_ReLU")(x)

    # 1x1 pointwise conv(linear)
    x = layers.Conv2D(filters=out_channel,
                      kernel_size=1,
                      strides=1,
                      padding="SAME",
                      use_bias=False,
                      name=prefix + "pointwise_Conv2D")(x)
    x = layers.BatchNormalization(name=prefix + "pointwise_BN")(x)

    # 满足两个条件才能使用short cut
    if stride == 1 and in_channel == out_channel:
        return layers.Add(name=prefix + "add")([inputs, x])

    return x
コード例 #20
0
    def block(inputs):

        if block_args.expand_ratio != 1:
            x = KL.Conv2D(filters,
                          kernel_size=[1, 1],
                          strides=[1, 1],
                          kernel_initializer=ConvKernalInitializer(),
                          padding='same',
                          use_bias=False)(inputs)
            x = KL.BatchNormalization(axis=channel_axis,
                                      momentum=batch_norm_momentum,
                                      epsilon=batch_norm_epsilon)(x)
            x = Swish()(x)
        else:
            x = inputs

        x = KL.DepthwiseConv2D([kernel_size, kernel_size],
                               strides=block_args.strides,
                               depthwise_initializer=ConvKernalInitializer(),
                               padding='same',
                               use_bias=False)(x)
        x = KL.BatchNormalization(axis=channel_axis,
                                  momentum=batch_norm_momentum,
                                  epsilon=batch_norm_epsilon)(x)
        x = Swish()(x)

        if has_se:
            x = SEBlock(block_args, global_params)(x)

        # output phase

        x = KL.Conv2D(block_args.output_filters,
                      kernel_size=[1, 1],
                      strides=[1, 1],
                      kernel_initializer=ConvKernalInitializer(),
                      padding='same',
                      use_bias=False)(x)
        x = KL.BatchNormalization(axis=channel_axis,
                                  momentum=batch_norm_momentum,
                                  epsilon=batch_norm_epsilon)(x)

        if block_args.id_skip:
            if all(s == 1 for s in block_args.strides
                   ) and block_args.input_filters == block_args.output_filters:
                # only apply drop_connect if skip presents.
                if drop_connect_rate:
                    x = DropConnect(drop_connect_rate)(x)
                x = KL.Add()([x, inputs])
        return x
コード例 #21
0
ファイル: net.py プロジェクト: zhaoyk1986/keras-portraitnet
 def ConvBloc(input_tensor, n_filters, ksize, strides=1):
     u = L.DepthwiseConv2D(ksize,
                           strides=strides,
                           use_bias=False,
                           padding='same')(input_tensor)
     u = L.BatchNormalization()(u)
     u = L.Activation('relu')(u)
     u = L.Conv2D(n_filters,
                  1,
                  use_bias=False,
                  padding='same',
                  activation='relu')(u)
     u = L.BatchNormalization()(u)
     u = L.Activation('relu')(u)
     return u
コード例 #22
0
ファイル: MobileNetV2.py プロジェクト: pipidog/ImageModelZoo
 def DWBNConv(cls,
              x_in,
              depth_multiplier=1,
              kernel_size=(3, 3),
              strides=(1, 1),
              l2_weight=1e-4):
     x = layers.DepthwiseConv2D(kernel_size=(3, 3),
                                strides=strides,
                                depth_multiplier=1,
                                padding='same',
                                kernel_initializer='he_uniform',
                                kernel_regularizer=l2(l2_weight))(x_in)
     x = layers.BatchNormalization()(x)
     x = layers.ReLU(max_value=6.0)(x)
     return x
def ARB(inp, fout, dk, dv, nh, kernel, aug=True):
    x = conv(inp, kernel=1, filt=fout * 4, pad='same')
    x = layers.BatchNormalization(axis=-1, fused=True)(x)
    x = layers.Activation('Mish')(x)
    x = layers.DepthwiseConv2D(kernel_size=kernel, strides=1,
                               padding='same')(x)
    x = layers.BatchNormalization(axis=-1, fused=True)(x)
    x = layers.Activation('Mish')(x)
    if aug == True:
        a = aug_block(x, fout * 4, dk, dv, nh, kernel)
        x = layers.Add()([a, x])
    x = conv(x, kernel=1, filt=fout, pad='same')
    x = layers.BatchNormalization(axis=-1, fused=True)(x)
    x = layers.Activation('Mish')(x)
    return x
コード例 #24
0
    def _inverted_res_block(i, filters, alpha, stride, expansion, block_id):
        prefix = 'block_{}_'.format(block_id)
        in_channels = i.shape[-1]
        x = i

        # Expand
        x = layers.Conv2D(expansion * in_channels,
                          kernel_size=1,
                          padding='valid',
                          use_bias=False,
                          activation=None,
                          name=prefix + 'expand')(x)
        x = layers.BatchNormalization(name=prefix + 'expand_BN')(
            x)  # epsilon=1e-3, momentum=0.999,
        x = layers.ReLU(name=prefix + 'expand_relu')(x)

        # Depthwise
        x = layers.DepthwiseConv2D(kernel_size=3,
                                   strides=stride,
                                   activation=None,
                                   use_bias=False,
                                   padding='same',
                                   name=prefix + 'depthwise')(x)
        x = layers.BatchNormalization(name=prefix + 'depthwise_BN')(
            x)  # epsilon=1e-3, momentum=0.999,
        x = layers.ReLU(name=prefix + 'depthwise_relu')(x)

        # Project
        pointwise_filters = int(filters * alpha)
        x = layers.Conv2D(pointwise_filters,
                          kernel_size=1,
                          padding='valid',
                          use_bias=False,
                          activation=None,
                          name=prefix + 'project')(x)
        x = layers.BatchNormalization(name=prefix + 'project_BN')(
            x)  # epsilon=1e-3, momentum=0.999,

        if stride == 1:
            if in_channels != pointwise_filters:
                i = layers.Conv2D(pointwise_filters,
                                  kernel_size=1,
                                  padding='valid',
                                  use_bias=False,
                                  activation=None,
                                  name=prefix + 'adjust')(i)
            x = layers.Add(name=prefix + 'add')([i, x])
        return x
コード例 #25
0
def _inverted_res_block(inputs, kernel, expansion, alpha, filters, block_id, stride=1):
    in_channels = inputs.shape[-1]
    pointwise_conv_filters = int(filters * alpha)
    pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
    x = inputs
    prefix = 'block_{}_'.format(block_id)

    if block_id:
        x = layers.Conv2D(expansion * in_channels,
                          kernel_size=1,
                          padding='same',
                          use_bias=False,
                          activation=None,
                          name=prefix + 'expand')(x)
        x = layers.BatchNormalization(epsilon=1e-3,
                                      momentum=0.999,
                                      name=prefix + 'expand_bn')(x)
        x = layers.ReLU(6., name=prefix + 'expand_relu')(x)
    else:
        prefix = 'expanded_conv_'

    x = layers.DepthwiseConv2D(kernel_size=kernel,
                               strides=stride,
                               activation=None,
                               use_bias=False,
                               padding='same',
                               name=prefix + 'depthwise')(x)
    x = layers.BatchNormalization(epsilon=1e-3,
                                  momentum=0.999,
                                  name=prefix + 'depthwise_bn')(x)

    x = layers.ReLU(6., name=prefix + 'depthwise_relu')(x)

    x = layers.Conv2D(pointwise_filters,
                      kernel_size=1,
                      padding='same',
                      use_bias=False,
                      activation=None,
                      name=prefix + 'project')(x)
    x = layers.BatchNormalization(
        epsilon=1e-3, momentum=0.999, name=prefix + 'project_bn')(x)

    print(x.name, inputs.shape, x.shape)

    if in_channels == pointwise_filters and stride == 1:
        print("Adding %s" % x.name)
        return layers.Add(name=prefix + 'add')([inputs, x])
    return x
コード例 #26
0
def _inverted_res_block(inputs, filters, alpha, stride, expansion, block_id,
                        skip_connection, rate):
    in_channels = inputs.shape[-1]
    pointwise_conv_filters = int(filters * alpha)
    pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
    x = inputs
    prefix = 'expanded_conv_{}_'.format(block_id)

    if block_id:
        # Expand
        x = layers.Conv2D(expansion * in_channels,
                          kernel_size=1,
                          padding="same",
                          use_bias=False,
                          kernel_regularizer=kernel_reg,
                          name=prefix + 'expand')(x)
        x = layers.BatchNormalization(momentum=0.999,
                                      name=prefix + 'expand_BN')(x)
        x = layers.ReLU(6., name=prefix + 'expand_relu')(x)
    else:
        prefix = 'expanded_conv_'

    # Depthwise
    x = layers.DepthwiseConv2D(kernel_size=3,
                               strides=stride,
                               use_bias=False,
                               padding="same",
                               kernel_regularizer=kernel_reg,
                               dilation_rate=(rate, rate),
                               name=prefix + 'depthwise')(x)
    x = layers.BatchNormalization(momentum=0.999,
                                  name=prefix + 'depthwise_BN')(x)
    x = layers.ReLU(6., name=prefix + 'depthwise_relu')(x)

    # Project
    x = layers.Conv2D(pointwise_filters,
                      kernel_size=1,
                      padding="same",
                      kernel_regularizer=kernel_reg,
                      use_bias=False,
                      name=prefix + 'project')(x)
    x = layers.BatchNormalization(momentum=0.999,
                                  name=prefix + 'project_BN')(x)

    if skip_connection:
        return layers.Add(name=prefix + 'add')([inputs, x])

    return x
コード例 #27
0
    def __init__(self, in_channels, out_channels, strides=1, t=6):
        super(LinearBottleNeck, self).__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.strides = strides

        self.residual = Sequential([
            layers.Conv2D(in_channels * t, (1, 1), strides=1, padding='same'),
            layers.BatchNormalization(),
            ReLU6(),
            layers.DepthwiseConv2D((3, 3), strides=strides, padding='same'),
            layers.BatchNormalization(),
            ReLU6(),
            layers.Conv2D(out_channels, (1, 1), strides=1, padding='same'),
            layers.BatchNormalization(),
        ])
コード例 #28
0
def InvertedRes_block(x, k_num, k_size, padding_type, name_id):
    x1 = Conv2DNormLReLU(x, k_num * 2, 3, "same", None)
    x2 = layers.DepthwiseConv2D(k_size, strides=(1, 1), padding="same")(x1)
    x2 = InstanceNormalization()(x2)
    x2 = layers.LeakyReLU(alpha=0.2)(x2)

    x3 = layers.Conv2D(k_num,
                       k_size,
                       strides=1,
                       padding="same",
                       use_bias=None,
                       kernel_initializer='he_normal')(x2)
    x3 = InstanceNormalization()(x3)

    y = layers.Add(name=name_id)([x, x3])
    return y
コード例 #29
0
def bn_dw_conv_valid(inputs, k_size, strs, name):
    '''
    the depthwise convolution, which also contains two operation: BN and dwconv.
    :param inputs: 3D tensor, input data
    :param k_size: 1D integer, the size of kernel
    :param strs: 1D integer, the stride for convolution
    :return: 3D tensor.
    '''
    x = layers.BatchNormalization(axis=-1, name='{}_bn'.format(name))(inputs)
    x = layers.DepthwiseConv2D(k_size, (strs, strs),
                               padding='valid',
                               depthwise_regularizer=l2(0.001),
                               use_bias=False,
                               name='{}_dwconv'.format(name))(
                                   x)  #, depthwise_regularizer = l2(0.002)
    return x
コード例 #30
0
    def __init__(self, up_channel_rate, channels, is_subsample, kernel_size):
        super(InvertedBottleneck, self).__init__()

        self.up_channel_rate = up_channel_rate
        self.l2_regularizer_00004 = regularizers.l2(0.00004)
        strides = (2, 2) if is_subsample else (1, 1)
        kernel_size = (kernel_size, kernel_size)
        self.dw_conv = layers.DepthwiseConv2D(kernel_size=kernel_size, strides=strides, padding="SAME",
                               kernel_regularizer=self.l2_regularizer_00004)
        self.conv1 = layers.Conv2D(filters=3, kernel_size=(1, 1), strides=(1, 1), padding='SAME')
        self.conv2 = layers.Conv2D(filters=channels, kernel_size=(1, 1), strides=(1, 1), padding='SAME')

        self.bn1 = layers.BatchNormalization(momentum=0.999)
        self.bn2 = layers.BatchNormalization(momentum=0.999)
        self.relu = layers.ReLU()
        self.relu6 = layers.ReLU(max_value=6)