示例#1
0
def inverted_res_block(input_tensor, filters, kernel_size, expansion_factor, width_mul, strides, res=False, act='relu', max_relu_val=None, excite=False, kr=None, group_bn=False, bn_groups=10):
  channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
  # Depth
  tchannel = K.int_shape(input_tensor)[channel_axis] * expansion_factor
  # Width
  cchannel = int(filters * width_mul)

  x = conv_block(input_tensor=input_tensor, filters=tchannel, kernel_size=(1, 1), strides=(1, 1), max_relu_val=max_relu_val, act=act, BN=True)

  x = DepthwiseConv2D(kernel_size, strides=(strides, strides), depth_multiplier=1, padding='same', kernel_regularizer=kr)(x)
  if group_bn:
    x = GroupNormalization(groups=bn_groups, axis=channel_axis, epsilon=0.1)(x)
  else:
    x = BatchNormalization(axis=channel_axis)(x)
  if act == 'relu':
    x = ReLU(max_value=max_relu_val)(x)

  if excite:
    x = squeeze_excite_block(x)

  x = Conv2D(cchannel, (1, 1), strides=(1, 1), padding='same', kernel_regularizer=kr)(x)
  if group_bn:
    x = GroupNormalization(groups=bn_groups, axis=channel_axis, epsilon=0.1)(x)
  else:
    x = BatchNormalization(axis=channel_axis)(x)

  if res:
    x = Add()([x, input_tensor])

  return x
示例#2
0
    def __init__(self, kernel_size=3, stride=1):
        super(DepthwiseConv2D_BN, self).__init__()

        self.dconv = DepthwiseConv2D(kernel_size, strides=stride,
                                     depth_multiplier=1,
                                     padding="SAME", use_bias=False)
        self.bn = BatchNormalization(axis=-1, momentum=0.9, epsilon=1e-5)
示例#3
0
def unit(x, groups, channels, strides):
    y = x

    x = Conv2D(channels // 4,
               kernel_size=1,
               strides=1,
               padding="same",
               groups=groups)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    x = channel_shuffle(x, groups)

    x = DepthwiseConv2D(kernel_size=3, strides=strides, padding="same")(x)
    x = BatchNormalization()(x)

    if strides == 2:
        channels = channels - y.shape[-1]

    x = Conv2D(channels,
               kernel_size=1,
               strides=1,
               padding="same",
               groups=groups)(x)

    if strides == 1:
        x = Add()([x, y])
    elif strides == 2:
        y = AvgPool2D(pool_size=3, strides=2, padding="same")(y)
        x = Concatenate([x, y])

    x = Activation('relu')(x)

    return x
示例#4
0
 def __init__(self, numfilt, strides, alpha, expansion, in_channel,
              block_id):
     super(ResBlock, self).__init__()
     prefix = 'block_{}_'.format(block_id)
     self.strides = strides
     self.in_channel = in_channel
     pw_filters = _make_divisible(int(numfilt * alpha), 8)
     self.pw_filters = pw_filters
     self.expand = ConvBlock(expansion * in_channel, 1, 1, 'same', True,
                             alpha, prefix + 'expand')
     self.dwconv = DepthwiseConv2D(
         kernel_size=3,
         strides=strides,
         activation=None,
         padding='same' if strides == 1 else 'valid',
         name=prefix + 'depthwise',
         data_format='channels_last',
         use_bias=False)
     self.bn = BatchNormalization(axis=3,
                                  epsilon=1e-3,
                                  momentum=0.99,
                                  name=prefix + 'depthwise_bn')
     self.relu = ReLU(6., name=prefix + 'depthwise_relu')
     self.pwconv = ConvBlock(pw_filters, 1, 1, 'same', False, alpha,
                             prefix + 'pointwise')
     self.add = Add(name=prefix + 'add')
def _depthwise_conv_block(inputs,
                          pointwise_conv_filters,
                          alpha,
                          depth_multiplier=1,
                          strides=(1, 1),
                          block_id=1):

    pointwise_conv_filters = int(pointwise_conv_filters * alpha)

    # 深度可分离卷积
    x = DepthwiseConv2D((3, 3),
                        padding='same',
                        depth_multiplier=depth_multiplier,
                        strides=strides,
                        use_bias=False,
                        name='conv_dw_%d' % block_id)(inputs)
    x = BatchNormalization(name='conv_dw_%d_bn' % block_id)(x)
    x = Activation(relu6, name='conv_dw_%d_relu' % block_id)(x)

    # 1x1卷积
    x = Conv2D(pointwise_conv_filters, (1, 1),
               padding='same',
               use_bias=False,
               strides=(1, 1),
               name='conv_pw_%d' % block_id)(x)
    x = BatchNormalization(name='conv_pw_%d_bn' % block_id)(x)
    return Activation(relu6, name='conv_pw_%d_relu' % block_id)(x)
示例#6
0
def inception_resnet(nb_classes=3,
                     Chans=64,
                     Samples=321,
                     dropoutRate=0.5,
                     kernLength=64,
                     F1=8,
                     D=2,
                     F2=16,
                     norm_rate=0.25,
                     dropoutType='Dropout',
                     gpu=True):

    if dropoutType == 'SpatialDropout2D':
        dropoutType = SpatialDropout2D
    elif dropoutType == 'Dropout':
        dropoutType = Dropout
    else:
        raise ValueError('dropoutType must be one of SpatialDropout2D '
                         'or Dropout, passed as a string.')

    input1 = Input(shape=(1, Chans, Samples))

    block1 = Conv2D(F1, (1, kernLength),
                    padding='same',
                    input_shape=(1, Chans, Samples),
                    use_bias=False,
                    data_format='channels_first')(input1)
    block1 = BatchNormalization(axis=1)(block1)
    block1 = DepthwiseConv2D((Chans, 1),
                             use_bias=False,
                             depth_multiplier=D,
                             depthwise_constraint=max_norm(1.),
                             data_format='channels_first')(block1)
    block1 = BatchNormalization(axis=1)(block1)
    block1 = Activation('elu')(block1)
    block1 = AveragePooling2D((1, 2), data_format='channels_first')(block1)
    block1 = dropoutType(dropoutRate)(block1)

    block2 = SeparableConv2D(F2, (1, 16),
                             use_bias=False,
                             padding='same',
                             data_format='channels_first')(block1)
    block2 = BatchNormalization(axis=1)(block2)
    block2 = Activation('elu')(block2)
    block2 = AveragePooling2D((1, 4), data_format='channels_first')(block2)
    block2 = dropoutType(dropoutRate)(block2)

    block3 = Conv2D(F2, (1, 8), padding='same',
                    data_format='channels_first')(block2)
    block3 = BatchNormalization(axis=1)(block3)
    block3 = Activation('elu')(block3)
    block3 = AveragePooling2D((1, 8), data_format='channels_first')(block3)

    flatten = Flatten(name='flatten')(block3)

    dense = Dense(1, name='out',
                  kernel_constraint=max_norm(norm_rate))(flatten)
    softmax = Activation('sigmoid', name='sigmoid')(dense)

    return Model(inputs=input1, outputs=softmax)
    def __init__(self,
                 filters,
                 kernel,
                 expansion,
                 strides,
                 squeeze,
                 nl,
                 alpha=1.0,
                 channel_axis=-1):
        super(Bottleneck, self).__init__()
        self.strides = strides
        self.filters = filters
        self.nl = nl
        self.squeeze = squeeze
        if self.squeeze:
            self.squeeze_layer = Squeeze()
        tchannel = int(expansion)
        cchannel = int(alpha * filters)

        self.conv_block = ConvBlock(tchannel,
                                    kernel=(1, 1),
                                    strides=(1, 1),
                                    nl=nl)
        self.dw_conv = DepthwiseConv2D(kernel,
                                       strides=(strides, strides),
                                       depth_multiplier=1,
                                       padding='same')
        self.bn1 = BatchNormalization(axis=channel_axis)
        self.conv2d = Conv2D(cchannel, (1, 1), strides=(1, 1), padding='same')
        self.bn2 = BatchNormalization(axis=channel_axis)
示例#8
0
def YoloDepthwiseConv2D(*args, **kwargs):
    """Wrapper to set Yolo parameters for DepthwiseConv2D."""
    #yolo_conv_kwargs = {'kernel_regularizer': l2(L2_FACTOR)}
    #yolo_conv_kwargs['bias_regularizer'] = l2(L2_FACTOR)
    #yolo_conv_kwargs.update(kwargs)
    yolo_conv_kwargs = kwargs
    return DepthwiseConv2D(*args, **yolo_conv_kwargs)
示例#9
0
def _ghost_module(inputs, exp, ratio, block_id, sub_block_id, part, kernel_size=1, dw_size=3, stride=1, relu=True):
    #--------------------------------------------------------------------------#
    #   ratio一般会指定成2
    #   这样才可以保证输出特征层的通道数,等于exp
    #--------------------------------------------------------------------------#
    output_channels = math.ceil(exp * 1.0 / ratio)

    #--------------------------------------------------------------------------#
    #   利用1x1卷积对我们输入进来的特征图进行一个通道的缩减,获得一个特征浓缩
    #   跨通道的特征提取
    #--------------------------------------------------------------------------#
    x = Conv2D(output_channels, kernel_size, strides=stride, padding="same", use_bias=False, kernel_initializer=RandomNormal(stddev=0.02),
            name="blocks."+str(block_id)+"."+str(sub_block_id)+".ghost"+str(part)+".primary_conv.0")(inputs)
    x = BatchNormalization(name="blocks."+str(block_id)+"."+str(sub_block_id)+".ghost"+str(part)+".primary_conv.1")(x)
    if relu:
        x = Activation('relu')(x)

    #--------------------------------------------------------------------------#
    #   在获得特征浓缩之后,使用逐层卷积,获得额外的特征图
    #   跨特征点的特征提取
    #--------------------------------------------------------------------------#
    dw = DepthwiseConv2D(dw_size, 1, padding="same", depth_multiplier=ratio-1, use_bias=False, depthwise_initializer=RandomNormal(stddev=0.02), 
            name="blocks."+str(block_id)+"."+str(sub_block_id)+".ghost"+str(part)+".cheap_operation.0")(x)
    dw = BatchNormalization(name="blocks."+str(block_id)+"."+str(sub_block_id)+".ghost"+str(part)+".cheap_operation.1")(dw)
    if relu:
        dw = Activation('relu')(dw)

    #--------------------------------------------------------------------------#
    #   将1x1卷积后的结果,和逐层卷积后的结果进行堆叠
    #--------------------------------------------------------------------------#
    x = Concatenate(axis=-1)([x, dw])
    x = Lambda(slices, arguments={'n':exp})(x)
    return x
示例#10
0
def sepconv3x3(neck_channels,
               output_channels,
               stride=(1, 1),
               expantion=1.5,
               decay=0.001):
    return tf.keras.Sequential([
        Conv2D(math.ceil(neck_channels * expantion),
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(decay)),
        BatchNormalization(),
        LeakyReLU(),
        DepthwiseConv2D(kernel_size=(3, 3),
                        padding='same',
                        strides=stride,
                        kernel_regularizer=l2(decay)),
        BatchNormalization(),
        LeakyReLU(),
        Conv2D(output_channels,
               kernel_size=(1, 1),
               use_bias=False,
               padding='same',
               kernel_regularizer=l2(decay)),
        BatchNormalization(),
        LeakyReLU()
    ])
def SepConv(filters, stride=1, kernel_size=3, rate=1, bn_epsilon=1e-3, input=None):

    # Conform to functional API
    if input is None:
        return (lambda x: SepConv(filters, stride=stride, kernel_size=kernel_size, rate=rate, bn_epsilon=bn_epsilon, input=x))

    if stride == 1:
        padding = "same"
        x = input
    else:
        pad_total = (kernel_size + ((kernel_size - 1) * (rate - 1))) - 1
        pad_left = pad_total // 2
        pad_right = pad_total - pad_left
        x = ZeroPadding2D((pad_left, pad_right))(input)
        padding = "valid"

    # TODO add tx2 support
    x = DepthwiseConv2D((kernel_size, kernel_size), strides=(stride, stride), dilation_rate=(rate, rate), padding=padding, use_bias=False)(x)
    #x = BatchNormalization(epsilon=bn_epsilon)(x)
    x = Activation("relu")(x)
    x = Conv2D(filters, (1, 1), padding="same", use_bias=False)(x)
    #x = BatchNormalization(epsilon=bn_epsilon)(x)
    x = Activation("relu")(x)

    return x
示例#12
0
def bottleneck(inputs, filters, kernel_size, e, s, squeeze, act_choice):

    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
    input_shape = K.int_shape(inputs)

    tchannel = int(e)
    cchannel = int(ALPHA * filters)

    r = s == 1 and input_shape[3] == filters  # ???

    A1 = conv_block(inputs, tchannel, 1, 1, act_choice)

    Z2 = DepthwiseConv2D(kernel_size,
                         strides=s,
                         depth_multiplier=1,
                         padding='same')(A1)
    Z2 = BatchNormalization(axis=channel_axis)(Z2)
    A2 = activation(Z2, act_choice)

    if squeeze:
        A2 = squeeze_block(A2)

    Z3 = Conv2D(cchannel, 1, strides=1, padding='same')(A2)
    O = BatchNormalization(axis=channel_axis)(Z3)

    if r:
        O = Add()([O, inputs])

    return O
示例#13
0
def residuals(inputs,
              filters,
              kernel,
              t=1,
              alpha=1.0,
              strides=1,
              use_residual=False):
    '''
    Bottleneck block
    '''

    # Depth
    tchannel = K.int_shape(inputs)[-1] * t
    # Width
    cchannel = int(filters * alpha)

    net = cbr(inputs, tchannel, 1, 1)

    net = DepthwiseConv2D(kernel,
                          strides=strides,
                          depth_multiplier=1,
                          padding='same',
                          use_bias=False)(net)
    net = BatchNormalization()(net)
    net = ReLU(6.)(net)

    net = Conv2D(cchannel, 1, strides=1, padding='same', use_bias=False)(net)
    net = BatchNormalization()(net)

    if use_residual:
        net = Add()([net, inputs])

    return net
示例#14
0
def createArch7(input_shape, output_shape, num_stride_layers, use_se):

    input_net = Input(shape=input_shape)
    x = input_net

    x = Conv2D(320,
               kernel_size=(1, 1),
               strides=(1, 1),
               padding='same',
               use_bias=False,
               activation=None,
               kernel_initializer="he_normal",
               kernel_regularizer=regularizers.l2(4e-5))(x)
    x = BatchNormalization(epsilon=1e-3, momentum=0.999)(x)
    x = ReLU(6.)(x)
    if num_stride_layers > 0:
        x = DepthwiseConv2D(kernel_size=(3, 3),
                            strides=(2, 2),
                            padding='same',
                            use_bias=False,
                            activation=None,
                            kernel_initializer="he_normal",
                            kernel_regularizer=regularizers.l2(4e-5))(x)
    else:
        x = DepthwiseConv2D(kernel_size=(3, 3),
                            strides=(1, 1),
                            padding='same',
                            use_bias=False,
                            activation=None,
                            kernel_initializer="he_normal",
                            kernel_regularizer=regularizers.l2(4e-5))(x)
    if use_se: x = _se_block(x, filters=320, se_ratio=0.25, prefix="shunt_0_")
    x = BatchNormalization(epsilon=1e-3, momentum=0.999)(x)
    x = ReLU(6.)(x)
    x = Conv2D(output_shape[-1],
               kernel_size=(1, 1),
               strides=(1, 1),
               padding='same',
               use_bias=False,
               activation=None,
               kernel_initializer="he_normal",
               kernel_regularizer=regularizers.l2(4e-5))(x)
    x = BatchNormalization(epsilon=1e-3, momentum=0.999)(x)

    model = Model(inputs=input_net, outputs=x, name='shunt')

    return model
def shuffle_unit(inputs, out_channels, bottleneck_ratio, strides=2, stage=1, block=1):
    if K.image_data_format() == 'channels_last':
        bn_axis = -1
    else:
        raise ValueError('Only channels last supported')

    prefix = 'stage{}/block{}'.format(stage, block)
    bottleneck_channels = int(out_channels * bottleneck_ratio)
    if strides < 2:
        c_hat, c = channel_split(inputs, '{}/spl'.format(prefix))
        inputs = c

    x = Conv2D(bottleneck_channels, kernel_size=(1, 1), strides=1,
               padding='same', name='{}/1x1conv_1'.format(prefix))(inputs)
    x = BatchNormalization(
        axis=bn_axis, name='{}/bn_1x1conv_1'.format(prefix))(x)
    x = Activation('relu', name='{}/relu_1x1conv_1'.format(prefix))(x)
    x = DepthwiseConv2D(kernel_size=3, strides=strides,
                        padding='same', name='{}/3x3dwconv'.format(prefix))(x)
    x = BatchNormalization(
        axis=bn_axis, name='{}/bn_3x3dwconv'.format(prefix))(x)
    x = Conv2D(bottleneck_channels, kernel_size=1, strides=1,
               padding='same', name='{}/1x1conv_2'.format(prefix))(x)
    x = BatchNormalization(
        axis=bn_axis, name='{}/bn_1x1conv_2'.format(prefix))(x)
    x = Activation('relu', name='{}/relu_1x1conv_2'.format(prefix))(x)

    if strides < 2:
        ret = Concatenate(
            axis=bn_axis, name='{}/concat_1'.format(prefix))([x, c_hat])
    else:
        s2 = DepthwiseConv2D(kernel_size=3, strides=2, padding='same',
                             name='{}/3x3dwconv_2'.format(prefix))(inputs)
        s2 = BatchNormalization(
            axis=bn_axis, name='{}/bn_3x3dwconv_2'.format(prefix))(s2)
        s2 = Conv2D(bottleneck_channels, kernel_size=1, strides=1,
                    padding='same', name='{}/1x1_conv_3'.format(prefix))(s2)
        s2 = BatchNormalization(
            axis=bn_axis, name='{}/bn_1x1conv_3'.format(prefix))(s2)
        s2 = Activation('relu', name='{}/relu_1x1conv_3'.format(prefix))(s2)
        ret = Concatenate(
            axis=bn_axis, name='{}/concat_2'.format(prefix))([x, s2])

    ret = Lambda(channel_shuffle,
                 name='{}/channel_shuffle'.format(prefix))(ret)

    return ret
示例#16
0
def _inverted_res_block(inputs,
                        expansion,
                        stride,
                        alpha,
                        in_filters,
                        filters,
                        block_id,
                        skip_connection,
                        rate=1):
    pointwise_conv_filters = int(filters * alpha)
    pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
    x = inputs
    prefix = 'expanded_conv_{}_'.format(block_id)
    if block_id:
        # Expand
        x = Conv2D(expansion * in_filters,
                   kernel_size=1,
                   padding='same',
                   use_bias=False,
                   activation=None,
                   name=prefix + 'expand')(x)
        x = BatchNormalization(epsilon=1e-3,
                               momentum=0.999,
                               name=prefix + 'expand_BN')(x)
        x = Activation(relu6, name=prefix + 'expand_relu')(x)
    else:
        prefix = 'expanded_conv_'
    # Depthwise
    x = DepthwiseConv2D(kernel_size=3,
                        strides=stride,
                        activation=None,
                        use_bias=False,
                        padding='same',
                        dilation_rate=(rate, rate),
                        name=prefix + 'depthwise')(x)
    x = BatchNormalization(epsilon=1e-3,
                           momentum=0.999,
                           name=prefix + 'depthwise_BN')(x)

    x = Activation(relu6, name=prefix + 'depthwise_relu')(x)

    # Project
    x = Conv2D(pointwise_filters,
               kernel_size=1,
               padding='same',
               use_bias=False,
               activation=None,
               name=prefix + 'project')(x)
    x = BatchNormalization(epsilon=1e-3,
                           momentum=0.999,
                           name=prefix + 'project_BN')(x)

    if skip_connection:
        return Add(name=prefix + 'add')([inputs, x])

    # if in_filters == pointwise_filters and stride == 1:
    #    return Add(name='res_connect_' + str(block_id))([inputs, x])

    return x
示例#17
0
文件: test.py 项目: RanWX/eeg_project
def EEGNet_SSVEP(nb_classes=12, Chans=8, Samples=256,
                 dropoutRate=0.5, kernLength=256, F1=96,
                 D=1, F2=96, dropoutType='Dropout'):
    """ SSVEP Variant of EEGNet, as used in [1].
    Inputs:

      nb_classes      : int, number of classes to classify
      Chans, Samples  : number of channels and time points in the EEG data
      dropoutRate     : dropout fraction
      kernLength      : length of temporal convolution in first layer
      F1, F2          : number of temporal filters (F1) and number of pointwise
                        filters (F2) to learn.
      D               : number of spatial filters to learn within each temporal
                        convolution.
      dropoutType     : Either SpatialDropout2D or Dropout, passed as a string.


    [1]. Waytowich, N. et. al. (2018). Compact Convolutional Neural Networks
    for Classification of Asynchronous Steady-State Visual Evoked Potentials.
    Journal of Neural Engineering vol. 15(6).
    http://iopscience.iop.org/article/10.1088/1741-2552/aae5d8
    """

    if dropoutType == 'SpatialDropout2D':
        dropoutType = SpatialDropout2D
    elif dropoutType == 'Dropout':
        dropoutType = Dropout
    else:
        raise ValueError('dropoutType must be one of SpatialDropout2D '
                         'or Dropout, passed as a string.')

    input1 = Input(shape=(Chans, Samples, 1))

    ##################################################################
    block1 = Conv2D(F1, (1, kernLength), padding='same',
                    input_shape=(Chans, Samples, 1),
                    use_bias=False)(input1)
    block1 = BatchNormalization()(block1)
    block1 = DepthwiseConv2D((Chans, 1), use_bias=False,
                             depth_multiplier=D,
                             depthwise_constraint=max_norm(1.))(block1)
    block1 = BatchNormalization()(block1)
    block1 = Activation('elu')(block1)
    block1 = AveragePooling2D((1, 4))(block1)
    block1 = dropoutType(dropoutRate)(block1)

    block2 = SeparableConv2D(F2, (1, 16),
                             use_bias=False, padding='same')(block1)
    block2 = BatchNormalization()(block2)
    block2 = Activation('elu')(block2)
    block2 = AveragePooling2D((1, 8))(block2)
    block2 = dropoutType(dropoutRate)(block2)

    flatten = Flatten(name='flatten')(block2)

    dense = Dense(nb_classes, name='dense')(flatten)
    softmax = Activation('softmax', name='softmax')(dense)

    return Model(inputs=input1, outputs=softmax)
示例#18
0
 def layer(x):
     convdw = DepthwiseConv2D(kernel_size=(3,3), padding="same")(x)
     convdw = BatchNormalization()(convdw)
     convdw = ReLU()(convdw)
     conv = Conv2D(filters=filters, kernel_size=(1,1), padding="same")(convdw)
     conv = BatchNormalization()(conv)
     conv = ReLU()(conv)
     return conv
    def build(self, input_shape):
        """Creates the layer weights.
        Must be implemented on all layers that have weights.

        Parameters
        ----------

        input_shape: Union[list, tuple, Any]
            Keras tensor (future input to layer) or list/tuple of Keras tensors
            to reference for weight shape computations.
        """

        DepthwiseConv2D.build(self, input_shape)
        self.init_neurons(input_shape.as_list())

        if self.config.getboolean('cell', 'bias_relaxation'):
            self.update_b()
示例#20
0
def inverted_residual(name, x, inp, filters, stride, expand_ratio, use_batch_norm=True, return_layers = False):
    assert stride in [1, 2]

    hidden_dim = round(inp * expand_ratio)
    use_res_connect = stride == 1 and inp == filters
    orig_x = x
    tensors = []

    if expand_ratio==1:
        x = DepthwiseConv2D(3, strides=stride, padding='same', use_bias=False, name=f"{name}_dwconv")(x)
        tensors.append(x)
        if use_batch_norm:
            x = BatchNormalization(name = f"{name}_bn0")(x)
            tensors.append(x)
        x = activation(x, name = f"{name}_act")
        tensors.append(x)
        x = Conv2D(filters, 1, strides=1, padding='valid', use_bias=False, name = f"{name}_pwlconv")(x)
        tensors.append(x)
        if use_batch_norm:
            x = BatchNormalization(name = f"{name}_bn1")(x)
            tensors.append(x)
    else:
        x = Conv2D(hidden_dim, 1, strides=1, padding='valid', use_bias=False, name = f"{name}_pwconv")(x)
        tensors.append(x)
        if use_batch_norm:
            x = BatchNormalization(name = f"{name}_bn0")(x)
            tensors.append(x)
        x = DepthwiseConv2D(3, strides=stride, padding='same', use_bias=False, name=f"{name}_dwconv")(x)
        tensors.append(x)
        if use_batch_norm:
            x = BatchNormalization(name = f"{name}_bn1")(x)
            tensors.append(x)
        x = activation(x, name = f"{name}_act")
        tensors.append(x)
        x = Conv2D(filters, 1, strides=1, padding='valid', use_bias=False, name = f"{name}_pwlconv")(x)
        tensors.append(x)
        if use_batch_norm:
            x = BatchNormalization(name = f"{name}_bn2")(x)
            tensors.append(x)

    if use_res_connect:
        x = Add(name=f"{name}_add")([x, orig_x])

    if return_layers:
        return x, tensors
    return x
    def call(self, x, training=None):

        G = len(self.filters)
        y = []
        for xi, fi in zip(tf.split(x, G, axis=-1), self.filters):
            o = DepthwiseConv2D(fi, **self.args)(xi)
            y.append(o)
        return Concatenate(axis=-1)(y)
示例#22
0
def conv_blocks(x, num_filter, num_iterations=1):
    for num_iter in range(0, num_iterations):
        x = ReLU()(x)
        shortcut = x
        x = DepthwiseConv2D(kernel_size=(3, 3), strides=(1, 1), padding='same', use_bias=True)(x)
        x = Conv2D(num_filter, kernel_size=(1, 1), strides=(1, 1), padding='valid', use_bias=True)(x)
        x = Add()([shortcut, x])
    return x
def depthwise_sep_conv(x, filters, alpha, strides=(1, 1)):
    y = DepthwiseConv2D((3, 3), padding='same', strides=strides)(x)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    y = Conv2D(int(filters * alpha), (1, 1), padding='same')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    return y
def depthwise_conv2d(kernel_size, stride=1, activation_fn='linear'):
    return htfe.siso_tensorflow_eager_module_from_tensorflow_op_fn(
        lambda: DepthwiseConv2D(kernel_size,
                                strides=stride,
                                padding='same',
                                activation=activation_fn,
                                use_bias=False), {},
        name="DepthwiseConv2D_%dx%d" % (kernel_size, kernel_size))
示例#25
0
def __global_depthwise_block(_inputs):
    assert _inputs._keras_shape[1] == _inputs._keras_shape[2]
    kernel_size = _inputs._keras_shape[1]
    x = DepthwiseConv2D((kernel_size, kernel_size),
                        strides=(1, 1),
                        depth_multiplier=1,
                        padding='valid')(_inputs)
    return x
def downsampling_block(x, filters, width, padding='same', activation='relu'):
    x = BatchNormalization(scale=True)(x)
    x = Activation(activation)(x)
    x1 = MaxPooling2D(pool_size=2, strides=2, padding=padding)(x)
    x2 = DepthwiseConv2D(3, depth_multiplier=1, strides=2, padding=padding)(x)
    x = concatenate([x1, x2], axis=3)
    x = Conv2D(filters, 1, strides=1)(x)
    return x
示例#27
0
    def _SepConv_BN(self,
                    x,
                    filters,
                    prefix,
                    stride=1,
                    kernel_size=3,
                    rate=1,
                    depth_activation=False,
                    point_bn=True,
                    point_activation=False,
                    epsilon=1e-3):
        """ SepConv with BN between depthwise & pointwise. Optionally add activation after BN
            Implements right "same" padding for even kernel sizes
            Args:
                x: input tensor
                filters: num of filters in pointwise convolution
                prefix: prefix before name
                stride: stride at depthwise conv
                kernel_size: kernel size for depthwise convolution
                rate: atrous rate for depthwise convolution
                depth_activation: flag to use activation between depthwise & poinwise convs
                epsilon: epsilon to use in BN layer
        """

        if stride == 1:
            depth_padding = 'same'
        else:
            kernel_size_effective = kernel_size + (kernel_size - 1) * (
                rate - 1)  # without padding around kernel
            pad_total = kernel_size_effective - 1  # padding for feature map
            pad_beg = pad_total // 2
            pad_end = pad_total - pad_beg
            x = ZeroPadding2D((pad_beg, pad_end))(x)
            depth_padding = 'valid'

        if not depth_activation:
            x = Activation('relu')(x)
        # # stride != 1 is incompatible with dilation_rate != 1
        x = DepthwiseConv2D((kernel_size, kernel_size),
                            strides=(stride, stride),
                            dilation_rate=(rate, rate),
                            padding=depth_padding,
                            use_bias=False,
                            name=prefix + '_depthwise')(x)
        x = BatchNormalization(name=prefix + '_depthwise_BN',
                               epsilon=epsilon)(x)
        x = Activation('relu')(x)
        x = Conv2D(filters, (1, 1),
                   padding='same',
                   use_bias=False,
                   name=prefix + '_pointwise')(x)
        if point_bn:
            x = BatchNormalization(name=prefix + '_pointwise_BN',
                                   epsilon=epsilon)(x)
        if point_activation:
            x = Activation('relu')(x)

        return x
示例#28
0
def SepConv_BN(x,
               filters,
               prefix="aspp",
               stride=1,
               kernel_size=3,
               rate=1,
               depth_activation=False,
               epsilon=1e-3,
               momentum=0.999):
    """ SepConv with BN between depthwise & pointwise. Optionally add activation after BN
        Implements right "same" padding for even kernel sizes
        Args:
            x: input tensor
            filters: num of filters in pointwise convolution
            prefix: prefix before name
            stride: stride at depthwise conv
            kernel_size: kernel size for depthwise convolution
            rate: atrous rate for depthwise convolution
            depth_activation: flag to use activation between depthwise & poinwise convs
            epsilon: epsilon to use in BN layer
        
        https://github.com/bonlime/keras-deeplab-v3-plus
    """

    if stride == 1:
        depth_padding = 'same'
    else:
        kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
        pad_total = kernel_size_effective - 1
        pad_beg = pad_total // 2
        pad_end = pad_total - pad_beg
        x = ZeroPadding2D((pad_beg, pad_end))(x)
        depth_padding = 'valid'

    if not depth_activation:
        x = Activation(tf.nn.relu)(x)
    x = DepthwiseConv2D((kernel_size, kernel_size),
                        strides=(stride, stride),
                        dilation_rate=(rate, rate),
                        padding=depth_padding,
                        use_bias=False,
                        name=prefix + '_depthwise')(x)
    x = BatchNormalization(name=prefix + '_depthwise_BN',
                           epsilon=epsilon,
                           momentum=momentum)(x)
    if depth_activation:
        x = Activation(tf.nn.relu)(x)
    x = Conv2D(filters, (1, 1),
               padding='same',
               use_bias=False,
               name=prefix + '_pointwise')(x)
    x = BatchNormalization(name=prefix + '_pointwise_BN',
                           epsilon=epsilon,
                           momentum=momentum)(x)
    if depth_activation:
        x = Activation(tf.nn.relu)(x)

    return x
示例#29
0
def encode_bottleneck(x,
                      proj_ch,
                      out_ch,
                      strides=1,
                      dilation=1,
                      separable=True,
                      depthwise=True,
                      preluop=False,
                      pool=False):
    x = PReLU(shared_axes=[1, 2])(x)
    y = Conv2D(filters=proj_ch,
               kernel_size=strides,
               strides=strides,
               padding='same')(x)
    y = PReLU(shared_axes=[1, 2])(y)

    if separable:

        if depthwise:
            y = SeparableConv2D(filters=proj_ch,
                                kernel_size=3,
                                strides=1,
                                padding='same')(y)
            y = PReLU(shared_axes=[1, 2])(y)
            y = DepthwiseConv2D(kernel_size=3, padding='same')(y)
        else:
            y = SeparableConv2D(filters=proj_ch,
                                kernel_size=5,
                                strides=1,
                                padding='same')(y)
    else:
        y = Conv2D(filters=out_ch,
                   kernel_size=3,
                   dilation_rate=dilation,
                   strides=1,
                   padding='same')(y)

    y = PReLU(shared_axes=[1, 2])(y)
    y = Conv2D(filters=out_ch, kernel_size=1, strides=1, padding='same')(y)

    if pool:
        m = MaxPool2D((2, 2), padding='same')(x)
        if m.shape[-1] != 128:
            x = Conv2D(filters=out_ch,
                       kernel_size=1,
                       strides=1,
                       padding='same')(m)
        else:
            x = m
        z = Add()([x, y])
        return z, m

    z = Add()([x, y])

    if preluop:
        return z, x

    return z
示例#30
0
def Sep_CONV_stack(X,
                   channel,
                   kernel_size=3,
                   stack_num=1,
                   dilation_rate=1,
                   activation='ReLU',
                   batch_norm=False,
                   name='sep_conv'):
    '''
    Depthwise separable convolution with 
    (optional) dilated convolution kernel and batch normalization.
    
    Input
    ----------
        X: input tensor
        channel: number of convolution filters
        kernel_size: size of 2-d convolution kernels
        stack_num: number of stacked depthwise-pointwise layers
        dilation_rate: option of dilated convolution kernel 
        activation: one of the `tensorflow.keras.layers` interface, e.g., ReLU
        batch_norm: True for batch normalization, False otherwise.
        name: name of the created keras layers
    Output
    ----------
        X: output tensor
    
    '''

    activation_func = eval(activation)
    bias_flag = not batch_norm

    for i in range(stack_num):
        X = DepthwiseConv2D(kernel_size,
                            dilation_rate=dilation_rate,
                            padding='same',
                            use_bias=bias_flag,
                            name='{}_{}_depthwise'.format(name, i))(X)

        if batch_norm:
            X = BatchNormalization(
                name='{}_{}_depthwise_BN'.format(name, i))(X)

        X = activation_func(
            name='{}_{}_depthwise_activation'.format(name, i))(X)

        X = Conv2D(channel, (1, 1),
                   padding='same',
                   use_bias=bias_flag,
                   name='{}_{}_pointwise'.format(name, i))(X)

        if batch_norm:
            X = BatchNormalization(
                name='{}_{}_pointwise_BN'.format(name, i))(X)

        X = activation_func(
            name='{}_{}_pointwise_activation'.format(name, i))(X)

    return X