Beispiel #1
0
def mobileNetV3_block(input, layer_name, expand_dims, out_dims, kernel, stride, ratio, activation_fn='RE', se=False,
                      short_cut=True):
    with tf.variable_scope(layer_name):
        net = slim.convolution2d(input, expand_dims, [1, 1], stride=1, activation_fn=None,
                                 scope=layer_name+'_pw_expand')

        if activation_fn == 'RE':
            net = slim.separable_convolution2d(net, num_outputs=None, stride=stride, activation_fn=relu6,
                                               depth_multiplier=1, kernel_size=kernel, scope=layer_name+'_dwise')
        elif activation_fn == 'HS':
            net = slim.separable_convolution2d(net, num_outputs=None, stride=stride, activation_fn=hard_swish,
                                               depth_multiplier=1, kernel_size=kernel, scope=layer_name+'_dwise')
        else:
            raise NotImplementedError

        if se is True:
            channel = net.get_shape().as_list()[-1]
            net = squeeze_and_excite(
                net, out_dims=channel, ratio=ratio, layer_name=layer_name+'se')

        # if activation_fn == 'RE':
        #     net = slim.convolution2d(net, out_dims, [1, 1], stride=1, activation_fn=relu6,
        #                              scope=layer_name+'_pw_reduce')
        # elif activation_fn == 'HS':
        #     net = slim.convolution2d(net, out_dims, [1, 1], stride=1, activation_fn=hard_swish,
        #                              scope=layer_name+'_pw_reduce')
        # else:
        #     raise NotImplementedError
        net = slim.convolution2d(net, out_dims, [1, 1], stride=1, activation_fn=None,
                                 scope=layer_name + '_pw_reduce')
        if stride == 1 and short_cut is True:
            net = net + input
    return net
Beispiel #2
0
def SepConv(x, C_out, kernel_size, stride):
    x = tflearn.relu(x)
    C_in = x.get_shape()[-1].value

    x = slim.separable_convolution2d(x,
                                     C_in,
                                     kernel_size,
                                     depth_multiplier=1,
                                     stride=stride)
    x = slim.batch_norm(x)

    x = slim.separable_convolution2d(x, C_out, kernel_size, depth_multiplier=1)
    x = slim.batch_norm(x)
    return x
Beispiel #3
0
def depthwise_separable_conv2(inputs,
								num_pwc_filters,
								sc,
								kernel_size,
								w_scale_l1,
								w_scale_l2,
								b_scale_l1,
								b_scale_l2,				
								stride):
	""" Helper function to build the depth-wise separable convolution layer.
	"""

	# skip pointwise by setting num_outputs=None
	depthwise_conv = slim.separable_convolution2d(inputs,
													num_outputs=None,
													stride=stride,
# 													weights_regularizer=slim.l1_l2_regularizer(w_scale_l1, w_scale_l2),
# 													biases_regularizer=slim.l1_l2_regularizer(b_scale_l1, b_scale_l2),
													depth_multiplier=1,
													kernel_size=kernel_size,
													scope=sc+'/depthwise_conv')

	bn = slim.batch_norm(depthwise_conv, scope=sc+'/dw_batch_norm')
	pointwise_conv = slim.convolution2d(bn,
										num_pwc_filters,
										kernel_size=[1, 1],
# 										weights_regularizer=slim.l1_l2_regularizer(w_scale_l1, w_scale_l2),
# 										biases_regularizer=slim.l1_l2_regularizer(b_scale_l1, b_scale_l2),
										scope=sc+'/pointwise_conv')
	
	bn = slim.batch_norm(pointwise_conv, scope=sc+'/pw_batch_norm')
	return bn
Beispiel #4
0
    def separable_conv(self, input, c_o, k_s, stride, dilation=1, activationFunc=tf.nn.relu6, scope=""):

        with slim.arg_scope([slim.batch_norm],
                            decay=0.999,
                            fused=True,
                            is_training=self.is4Train,
                            activation_fn=activationFunc):
            output = slim.separable_convolution2d(input,
                                                  num_outputs=None,
                                                  stride=stride,
                                                  trainable=self.is4Train,
                                                  depth_multiplier=opt.depth_multiplier,
                                                  kernel_size=[k_s, k_s],
                                                  rate=dilation,
                                                  weights_initializer=self.init_xavier,
                                                  weights_regularizer=self.l2_regularizer,
                                                  biases_initializer=None,
                                                  activation_fn=tf.nn.relu6,
                                                  scope=scope + '_depthwise')

            output = slim.convolution2d(output,
                                        c_o,
                                        stride=1,
                                        kernel_size=[1, 1],
                                        weights_initializer=self.init_xavier,
                                        biases_initializer=self.init_zero,
                                        normalizer_fn=slim.batch_norm,
                                        trainable=self.is4Train,
                                        weights_regularizer=None,
                                        scope=scope + '_pointwise')

        return output
Beispiel #5
0
    def depthwise_separable_conv(self,
                                 preprocessed_inputs,
                                 num_pwc_filters,
                                 width_multiplier,
                                 name,
                                 downsampling=False):
        num_pwc_filters = round(num_pwc_filters *
                                width_multiplier)  #使用宽度乘数进一步减小模型

        _stride = 2 if downsampling else 1

        # skip pointwise by setting num_outputs=None
        # num_outputs:pointwise 卷积的卷积核个数,如果为空,将跳过pointwise卷积的步骤,后面我们通过一般的1x1卷积自己实现pointwise
        depthwise_conv = slim.separable_convolution2d(preprocessed_inputs,
                                                      num_outputs=None,
                                                      stride=_stride,
                                                      depth_multiplier=1,
                                                      kernel_size=[3, 3],
                                                      scope=name +
                                                      "/depthwise_conv")

        depthwise_bn = slim.batch_norm(depthwise_conv,
                                       scope=name + "/depthwise_batch_norm")

        #通过一般的1x1卷积实现pointwise卷积
        # num_pwc_filters:宽度乘数下减少后的pointwise卷积核个数,也就是输出的feature map的通道数
        pointwise_conv = slim.convolution2d(depthwise_bn,
                                            num_pwc_filters,
                                            kernel_size=[1, 1],
                                            scope=name + "/pointwise_conv")

        pointwise_bn = slim.batch_norm(pointwise_conv,
                                       scope=name + "pointwise_batch_norm")

        return pointwise_bn
Beispiel #6
0
def depthwise_separable_conv2D(inputs,
                               num_pwc_filters,
                               scope=None,
                               width_multiplier=1,
                               stride=1,
                               rate=1,
                               normalizer_fn=layer_norm,
                               padding=None,
                               slice=None):
    """ Helper function to build the depth-wise separable convolution layer.
    """
    # num_pwc_filters = round(num_pwc_filters * width_multiplier)
    _stride = stride
    sc = str(scope)
    # skip pointwise by setting num_outputs=None
    depthwise_conv = slim.separable_convolution2d(inputs,
                                                  num_outputs=None,
                                                  stride=_stride,
                                                  depth_multiplier=1,
                                                  kernel_size=[3, 3],
                                                  rate=rate,
                                                  scope=sc + '/depthwise_conv')
    # depthwise_conv = separable_conv2d(inputs, stride=_stride, depth_multiplier=width_multiplier, kernel=[3, 3], rate=rate, scope=sc+'/depthwise_conv',padding=padding, slice=slice)
    ln = normalizer_fn(depthwise_conv, scope=sc + '/dw_layer_norm')
    pointwise_conv = conv2d(ln,
                            num_pwc_filters, [1, 1],
                            scope=sc + '/pointwise_conv',
                            rate=2,
                            normalizer_fn=None)
    ln = normalizer_fn(pointwise_conv, scope=sc + '/pw_layer_norm')
    return ln
Beispiel #7
0
    def _depthwise_separable_conv(self,
                                  inputs,
                                  num_pwc_filters,
                                  width_multiplier,
                                  sc,
                                  downsample=False):
        num_pwc_filters = round(num_pwc_filters * width_multiplier)
        _stride = 2 if downsample else 1

        # skip pointwise by setting num_outputs=None
        # num_outputs:pointwise 卷积的卷积核个数,如果为空,将跳过pointwise卷积的步骤,后面我们通过一般的1x1卷积自己实现pointwise
        depthwise_conv = slim.separable_convolution2d(inputs,
                                                      num_outputs=None,
                                                      stride=_stride,
                                                      depth_multiplier=1,
                                                      kernel_size=[3, 3],
                                                      scope=sc +
                                                      '/depthwise_conv')

        bn = slim.batch_norm(depthwise_conv, scope=sc + '/dw_batch_norm')
        pointwise_conv = slim.convolution2d(
            depthwise_conv,
            num_pwc_filters,  # 该层卷积核个数,也是输出的feature map的通道数
            kernel_size=[1, 1],
            scope=sc + '/pointwise_conv')
        bn = slim.batch_norm(pointwise_conv, scope=sc + '/pw_batch_norm')
        return bn
Beispiel #8
0
  def _depthwise_separable_conv(inputs,
                                num_pwc_filters,
                                width_multiplier,
                                sc,
                                downsample=False):
    """ Helper function to build the depth-wise separable convolution layer.
    """
    num_pwc_filters = round(num_pwc_filters * width_multiplier)
    _stride = 2 if downsample else 1

    # skip pointwise by setting num_outputs=None
    depthwise_conv = slim.separable_convolution2d(inputs,
                                                  num_outputs=None,
                                                  stride=_stride,
                                                  depth_multiplier=1,
                                                  kernel_size=[3, 3],
                                                  scope=sc+'/depthwise_conv')

    bn = slim.batch_norm(depthwise_conv, scope=sc+'/dw_batch_norm')
    pointwise_conv = slim.convolution2d(bn,
                                        num_pwc_filters,
                                        kernel_size=[1, 1],
                                        scope=sc+'/pointwise_conv')
    bn = slim.batch_norm(pointwise_conv, scope=sc+'/pw_batch_norm')
    return bn
    def separable_conv(self, input, k_h, k_w, c_o, stride, name, relu=True, set_bias=True):
        with slim.arg_scope([slim.batch_norm], decay=0.999, fused=common.batchnorm_fused, is_training=self.trainable):
            output = slim.separable_convolution2d(input,
                                                  num_outputs=None,
                                                  stride=stride,
                                                  trainable=self.trainable,
                                                  depth_multiplier=1.0,
                                                  kernel_size=[k_h, k_w],
                                                  # activation_fn=common.activation_fn if relu else None,
                                                  activation_fn=None,
                                                  # normalizer_fn=slim.batch_norm,
                                                  weights_initializer=_init_xavier,
                                                  # weights_initializer=_init_norm,
                                                  weights_regularizer=_l2_regularizer_00004,
                                                  biases_initializer=None,
                                                  padding=DEFAULT_PADDING,
                                                  scope=name + '_depthwise')

            output = slim.convolution2d(output,
                                        c_o,
                                        stride=1,
                                        kernel_size=[1, 1],
                                        activation_fn=common.activation_fn if relu else None,
                                        weights_initializer=_init_xavier,
                                        # weights_initializer=_init_norm,
                                        biases_initializer=_init_zero if set_bias else None,
                                        normalizer_fn=slim.batch_norm,
                                        trainable=self.trainable,
                                        weights_regularizer=None,
                                        scope=name + '_pointwise')

        return output
Beispiel #10
0
    def MobileNetV3_bolck(self,inputs,kernel_size,bottleneck_channels,block_output_channels,stride,h_wish,se_moudle,name,SE_ratio=16):
        with tf.variable_scope(name):
            block_net = slim.convolution2d(inputs,num_outputs=bottleneck_channels,
                                           kernel_size=1,stride=1)
            if h_wish:
                block_net=self.hard_swish(block_net)
            else:
                block_net=tf.nn.relu6(block_net)

            block_net=slim.separable_convolution2d(block_net, num_outputs=None,
                                                   kernel_size=kernel_size,stride=stride)

            if h_wish:
                block_net=self.hard_swish(block_net)
            else:
                block_net=tf.nn.relu6(block_net)

            #SE_moudle:squeeze and excitation
            if se_moudle:
                block_net=self.SE_Moudle(block_net,ratio=SE_ratio)


            #point wise
            block_net=slim.convolution2d(block_net,num_outputs=block_output_channels,
                                         kernel_size=1,stride=1)

            #element wise add,onle for stride=1
            input_channels=inputs.get_shape().as_list()[-1]

            if stride==1 and input_channels==block_output_channels:
                block_net=block_net+inputs
                block_net=tf.identity(block_net,name="output")

        return block_net
Beispiel #11
0
    def _depthwise_separable_conv(inputs,
                                  num_pwc_filters,
                                  width_multiplier,
                                  sc,
                                  downsample=False,
                                  freeze_convs=False):
        num_pwc_filters = round(num_pwc_filters * width_multiplier)
        _stride = 2 if downsample else 1

        # skip pointwise by setting num_outputs=None
        depthwise_conv = slim.separable_convolution2d(
            inputs,
            num_outputs=None,
            stride=_stride,
            depth_multiplier=1,
            kernel_size=[3, 3],
            scope=sc + '/depthwise_conv',
            trainable=not freeze_convs)

        bn = slim.batch_norm(depthwise_conv,
                             scope=sc + '/dw_batch_norm',
                             trainable=not freeze_convs)
        pointwise_conv = slim.convolution2d(bn,
                                            num_pwc_filters,
                                            kernel_size=[1, 1],
                                            scope=sc + '/pointwise_conv',
                                            trainable=not freeze_convs)
        bn = slim.batch_norm(pointwise_conv,
                             scope=sc + '/pw_batch_norm',
                             trainable=not freeze_convs)
        return bn
Beispiel #12
0
 def dw_separable(self,
                  input,
                  nr_filters,
                  width_multiplier,
                  depth_multiplier,
                  sc,
                  downsample=False):
     nr_filters = round(nr_filters * width_multiplier)
     if downsample:
         stride = 2
     else:
         stride = 1
     depthwise_conv = slim.separable_convolution2d(
         input,
         num_outputs=None,
         stride=stride,
         depth_multiplier=depth_multiplier,
         kernel_size=[3, 3],
         scope=sc + '/depthwise_conv')
     #bn = slim.batch_norm(depthwise_conv, scope=sc+'/dw_batch_norm')
     pointwise_conv = slim.convolution2d(depthwise_conv,
                                         nr_filters,
                                         kernel_size=[1, 1],
                                         scope=sc + '/pointwise_conv')
     #bn = slim.batch_norm(pointwise_conv, scope=sc+'/pw_batch_norm')
     return pointwise_conv
Beispiel #13
0
def depthwise_separable_conv(inputs,
                             num_pwc_filters,
                             width_multiplier,
                             sc,
                             downsample=False):

    num_pwc_filters = round(num_pwc_filters * width_multiplier)

    if downsample:
        _strides = 2
    else:
        _strides = 1

    depthwise_conv = slim.separable_convolution2d(inputs,
                                                  num_outputs=None,
                                                  stride=_strides,
                                                  depth_multiplier=1,
                                                  kernel_size=[3, 3],
                                                  scope=sc + '/depthwise_conv')

    bn = slim.batch_norm(depthwise_conv, scope=sc + '/dw_batch_norm')

    pointwise_conv = slim.convolution2d(bn,
                                        num_pwc_filters,
                                        kernel_size=[1, 1],
                                        scope=sc + '/pointwise_conv')

    bn = slim.batch_norm(pointwise_conv, scope=sc + '/pw_batch_norm')

    return bn
Beispiel #14
0
    def bootleneck(self,inputs,bottleneck_channel_upsample_rate,bottleneck_output_channels,stride,name):
        with tf.variable_scope(name):
            input_channels=inputs.get_shape().as_list()[-1]

            # MobileNetV2的第一个核心:reverted residual
            # ResNet在bottleneck中是先降维,再升维度,但是MobileNetV2在是先升维再降维
            # 论文中bottleneck_channel_upsample_rate=6
            bottleneck=slim.convolution2d(inputs,int(input_channels*bottleneck_channel_upsample_rate),
                                          kernel_size=[1,1],stride=1,scope="pw_conv1")
            bottleneck=slim.batch_norm(bottleneck,scope="bn1")

            # num_outputs:pointwise 卷积的卷积核个数,如果为空,将跳过pointwise卷积的步骤,
            # 后面我们通过一般的1x1卷积自己实现pointwise
            bottleneck = slim.separable_convolution2d(bottleneck,num_outputs=None,stride=stride,
                                                      depth_multiplier=1,kernel_size=[3,3],scope="dw_conv")
            bottleneck = slim.batch_norm(bottleneck, scope="bn2")


            bottleneck = slim.convolution2d(bottleneck, bottleneck_output_channels,activation_fn=None,
                                            kernel_size=[1, 1], stride=1, scope="pw_conv2")

            # bottleNeck的第二个pointwise_conv不使用激活函数
            # 也是Mobile的第二个核心:linear bottleneck
            # 因为非线性激活函数在高维空间内可以保证非线性,但在低维空间内非线性降低,会导致低维空间的信息损失
            bottleneck = slim.batch_norm(bottleneck, scope="bn3",activation_fn=None)

            bottle_channels=bottleneck.get_shape().as_list()[-1]
            if bottle_channels==input_channels:
                bottleneck_output=tf.add(bottleneck,inputs)
            else:
                bottleneck_output=bottleneck
            return bottleneck_output
def init_resblock(x,
                  out_channel,
                  stride,
                  scope='init_resblock',
                  is_training=True):
    with tf.variable_scope(scope):

        with slim.arg_scope([slim.separable_convolution2d],
                            normalizer_fn=slim.batch_norm,
                            activation_fn=tf.nn.relu6):
            with slim.arg_scope([slim.batch_norm],
                                is_training=is_training,
                                center=True,
                                scale=True):
                x = slim.separable_convolution2d(x,
                                                 None, [3, 3],
                                                 depth_multiplier=1,
                                                 stride=stride,
                                                 biases_initializer=None,
                                                 biases_regularizer=None)

        with slim.arg_scope([slim.convolution2d],
                            normalizer_fn=slim.batch_norm):
            with slim.arg_scope([slim.batch_norm],
                                is_training=is_training,
                                center=True,
                                scale=True):
                x = slim.convolution2d(x,
                                       out_channel, [1, 1],
                                       stride=1,
                                       padding='same',
                                       biases_initializer=None,
                                       biases_regularizer=None)

                return x
Beispiel #16
0
    def separable_conv(self, input, k_h, k_w, c_o, stride, name, relu=True):
        with slim.arg_scope([slim.batch_norm], fused=True):
            output = slim.separable_convolution2d(
                input,
                num_outputs=None,
                stride=stride,
                trainable=self.trainable,
                depth_multiplier=1.0,
                kernel_size=[k_h, k_w],
                activation_fn=None,
                weights_initializer=tf.contrib.layers.xavier_initializer(),
                # weights_initializer=tf.truncated_normal_initializer(stddev=0.09),
                biases_initializer=None,
                padding=DEFAULT_PADDING,
                scope=name + '_depthwise')

            output = slim.convolution2d(
                output,
                c_o,
                stride=1,
                kernel_size=[1, 1],
                activation_fn=tf.nn.relu if relu else None,
                weights_initializer=tf.contrib.layers.xavier_initializer(),
                # weights_initializer=tf.truncated_normal_initializer(stddev=0.09),
                biases_initializer=slim.init_ops.zeros_initializer(),
                normalizer_fn=slim.batch_norm,
                trainable=self.trainable,
                # weights_regularizer=tf.contrib.layers.l2_regularizer(0.00004),
                weights_regularizer=None,
                scope=name + '_pointwise')

        return output
Beispiel #17
0
    def _depthwise_separable_conv(inputs,
                                  num_pwc_filters,
                                  width_multiplier,
                                  sc,
                                  downsample=False):
        """ Helper function to build the depth-wise separable convolution layer.
    """
        num_pwc_filters = round(num_pwc_filters * width_multiplier)
        _stride = 2 if downsample else 1

        # skip pointwise by setting num_outputs=None
        depthwise_conv = slim.separable_convolution2d(inputs,
                                                      num_outputs=None,
                                                      stride=_stride,
                                                      depth_multiplier=1,
                                                      kernel_size=[3, 3],
                                                      scope=sc +
                                                      '/depthwise_conv')

        bn = slim.batch_norm(depthwise_conv, scope=sc + '/dw_batch_norm')
        pointwise_conv = slim.convolution2d(bn,
                                            num_pwc_filters,
                                            kernel_size=[1, 1],
                                            scope=sc + '/pointwise_conv')
        bn = slim.batch_norm(pointwise_conv, scope=sc + '/pw_batch_norm')
        return bn
Beispiel #18
0
    def MBConv(self, inputs, output_channels, depth_ratio, kernel_size,
               is_downsample, name):
        stride = 2 if is_downsample else 1
        with tf.variable_scope(name):
            inputs_channels = inputs.get_shape().as_list()[-1]
            bottlenect_channels = int(depth_ratio * inputs_channels)

            MBConv_net = slim.convolution2d(inputs,
                                            num_outputs=bottlenect_channels,
                                            kernel_size=1,
                                            stride=1,
                                            scope="1x1_conv1")
            MBConv_net = slim.separable_convolution2d(MBConv_net,
                                                      num_outputs=None,
                                                      kernel_size=kernel_size,
                                                      stride=stride,
                                                      scope="DWConv")
            MBConv_net = slim.convolution2d(MBConv_net,
                                            num_outputs=output_channels,
                                            kernel_size=1,
                                            stride=1,
                                            scope="1x1_conv2")

            if not is_downsample and inputs_channels == output_channels:
                MBConv_net = MBConv_net + inputs
                MBConv_net = tf.identity(MBConv_net, name="out")
            return MBConv_net
Beispiel #19
0
def separable_conv(input, c_o, k_s, stride, scope):
    with slim.arg_scope([slim.batch_norm],
                        decay=0.999,
                        fused=True,
                        is_training=_trainable,
                        activation_fn=tf.nn.relu6):
        output = slim.separable_convolution2d(
            input,
            num_outputs=None,
            stride=stride,
            trainable=_trainable,
            depth_multiplier=1.0,
            kernel_size=[k_s, k_s],
            weights_initializer=_init_xavier,
            weights_regularizer=_l2_regularizer_00004,
            biases_initializer=None,
            scope=scope + '_depthwise')

        output = slim.convolution2d(output,
                                    c_o,
                                    stride=1,
                                    kernel_size=[1, 1],
                                    weights_initializer=_init_xavier,
                                    biases_initializer=_init_zero,
                                    normalizer_fn=slim.batch_norm,
                                    trainable=_trainable,
                                    weights_regularizer=None,
                                    scope=scope + '_pointwise')

    return output
Beispiel #20
0
    def separable_conv(self, input, k_h, k_w, c_o, stride, name, relu=True, set_bias=True):
        with slim.arg_scope([slim.batch_norm], decay=0.999, fused=common.batchnorm_fused, is_training=self.trainable):
            output = slim.separable_convolution2d(input,
                                                  num_outputs=None,
                                                  stride=stride,
                                                  trainable=self.trainable,
                                                  depth_multiplier=1.0,
                                                  kernel_size=[k_h, k_w],
                                                  # activation_fn=common.activation_fn if relu else None,
                                                  activation_fn=None,
                                                  # normalizer_fn=slim.batch_norm,
                                                  weights_initializer=_init_xavier,
                                                  # weights_initializer=_init_norm,
                                                  weights_regularizer=_l2_regularizer_00004,
                                                  biases_initializer=None,
                                                  padding=DEFAULT_PADDING,
                                                  scope=name + '_depthwise')

            output = slim.convolution2d(output,
                                        c_o,
                                        stride=1,
                                        kernel_size=[1, 1],
                                        activation_fn=common.activation_fn if relu else None,
                                        weights_initializer=_init_xavier,
                                        # weights_initializer=_init_norm,
                                        biases_initializer=_init_zero if set_bias else None,
                                        normalizer_fn=slim.batch_norm,
                                        trainable=self.trainable,
                                        weights_regularizer=None,
                                        scope=name + '_pointwise')

        return output
Beispiel #21
0
 def shuffle_unit(self,
                  input,
                  name,
                  nr_groups=3,
                  stride=1,
                  first_block=False):
     in_channels = input.shape.as_list()[3]
     # Group conv 1
     if first_block:
         layer = self.group_conv('g1' + name, input, in_channels, 1)
     else:
         layer = self.group_conv('g2' + name, input, in_channels, nr_groups)
     # No batch norm as of now
     layer = tf.nn.relu(layer)
     layer = self.shuffle(layer, nr_groups)
     # Depthwise conv
     layer = slim.separable_convolution2d(layer,
                                          num_outputs=None,
                                          stride=stride,
                                          depth_multiplier=1,
                                          kernel_size=[3, 3])
     # Group conv 2
     layer = self.group_conv('g3' + name, input, in_channels, nr_groups)
     if stride >= 2:
         input = tf.nn.avg_pool(input, [1, 3, 3, 1], [1, 2, 2, 1], 'SAME')
         layer = tf.concat([layer, input], 3)
     else:
         layer = tf.add(layer, input)
     layer = tf.nn.relu(layer)
     return layer
def _depthwise_separable_conv(inputs,
                              num_pwc_filters,
                              width_multiplier,
                              sc,
                              downsample=False):
    num_pwc_filters = round(num_pwc_filters * width_multiplier)
    _stride = 2 if downsample else 1

    # skip pointwise by setting num_outputs=None
    depthwise_conv = slim.separable_convolution2d(inputs,
                                                  num_outputs=None,
                                                  stride=_stride,
                                                  depth_multiplier=1,
                                                  kernel_size=[3, 3],
                                                  scope=sc + '/depthwise_conv')

    bn = slim.batch_norm(
        depthwise_conv,
        updates_collections=None,
        variables_collections=[tf.GraphKeys.TRAINABLE_VARIABLES],
        scope=sc + '/dw_batch_norm')
    pointwise_conv = slim.convolution2d(bn,
                                        num_pwc_filters,
                                        kernel_size=[1, 1],
                                        scope=sc + '/pointwise_conv')
    bn = slim.batch_norm(
        pointwise_conv,
        updates_collections=None,
        variables_collections=[tf.GraphKeys.TRAINABLE_VARIABLES],
        scope=sc + '/pw_batch_norm')
    return bn
Beispiel #23
0
def depthwise_separable_conv(inputs,
                             num_pwc_filters,
                             width_multiplier,
                             sc,
                             downsample=False,
                             batch_norm=True):

    num_pwc_filters = round(num_pwc_filters * width_multiplier)
    _stride = 2 if downsample else 1

    # skip pointwise by setting num_outputs=None
    depthwise_conv = slim.separable_convolution2d(
        inputs,
        num_outputs=None,
        stride=_stride,
        depth_multiplier=1,
        kernel_size=[3, 3],
        scope=sc + '/depthwise_conv',
        weights_initializer=weight_init)

    #bn = slim.batch_norm(depthwise_conv, scope=sc+'/dw_batch_norm')
    depthwise_conv = tf.nn.relu(depthwise_conv)
    if (batch_norm):
        depthwise_conv = tf.contrib.layers.batch_norm(depthwise_conv)
    pointwise_conv = slim.convolution2d(depthwise_conv,
                                        num_pwc_filters,
                                        kernel_size=[1, 1],
                                        scope=sc + '/pointwise_conv',
                                        weights_initializer=weight_init)

    pointwise_conv = tf.nn.relu(pointwise_conv)
    if (batch_norm):
        pointwise_conv = tf.contrib.layers.batch_norm(pointwise_conv)
    #bn = slim.batch_norm(pointwise_conv, scope=sc+'/pw_batch_norm')
    return pointwise_conv
def dw_conv2d(x_tensor, conv_ksize, stride, name):
    layer = slim.separable_convolution2d(x_tensor,
                                         num_outputs=None,
                                         stride=stride,
                                         depth_multiplier=1,
                                         kernel_size=conv_ksize,
                                         scope=name)
    return layer
Beispiel #25
0
    def separable_conv(self,
                       input,
                       k_h,
                       k_w,
                       c_o,
                       stride,
                       name,
                       relu=True,
                       set_bias=True):
        """
        实现深度可分离卷积
        :param input:
        :param k_h: 卷积核高度
        :param k_w: 卷积核宽度
        :param c_o: 输出通道数
        :param stride: 步长
        :param name: 操作名
        :param relu: 是否使用relu激活函数
        :param set_bias: 是否加上偏置
        :return:
        """
        with slim.arg_scope([slim.batch_norm],
                            decay=0.999,
                            fused=common.batchnorm_fused,
                            is_training=self.trainable):
            output = slim.separable_convolution2d(
                input,
                num_outputs=None,
                stride=stride,
                trainable=self.trainable,
                depth_multiplier=1.0,
                kernel_size=[k_h, k_w],
                # activation_fn=common.activation_fn if relu else None,
                activation_fn=None,
                normalizer_fn=slim.batch_norm,
                # weights_initializer=_init_norm,
                weights_initializer=_init_xavier,
                weights_regularizer=_l2_regularizer_00004,
                biases_initializer=None,
                padding=DEFAULT_PADDING,
                scope=name + '_depthwise')

            output = slim.convolution2d(
                output,
                c_o,
                stride=1,
                kernel_size=[1, 1],
                activation_fn=common.activation_fn if relu else None,
                weights_initializer=_init_xavier,
                # weights_initializer=_init_norm,
                biases_initializer=_init_zero if set_bias else None,
                normalizer_fn=slim.batch_norm,
                trainable=self.trainable,
                weights_regularizer=None,
                scope=name + '_pointwise')
        frature = tf.transpose(output, [3, 1, 2, 0])[..., 0:1]
        tf.summary.image(name, frature, max_outputs=32)
        return output
def inverte_resblock(x,
                     in_channel,
                     out_channel,
                     stride,
                     expand_radio=6,
                     res_connect=True,
                     scope='inverte_resblock',
                     is_training=True):
    mid_channel = in_channel * expand_radio
    with tf.variable_scope(scope):
        if res_connect:
            short_cut = x

        with slim.arg_scope([slim.convolution2d],
                            normalizer_fn=slim.batch_norm,
                            activation_fn=tf.nn.relu6):
            with slim.arg_scope([slim.batch_norm],
                                is_training=is_training,
                                center=True,
                                scale=True):
                x = slim.convolution2d(x,
                                       mid_channel, [1, 1],
                                       stride=1,
                                       padding='same',
                                       biases_initializer=None,
                                       biases_regularizer=None)

        with slim.arg_scope([slim.separable_convolution2d],
                            normalizer_fn=slim.batch_norm,
                            activation_fn=tf.nn.relu6):
            with slim.arg_scope([slim.batch_norm],
                                is_training=is_training,
                                center=True,
                                scale=True):
                x = slim.separable_convolution2d(x,
                                                 None, [3, 3],
                                                 depth_multiplier=1,
                                                 stride=stride,
                                                 biases_initializer=None,
                                                 biases_regularizer=None)

        with slim.arg_scope([slim.convolution2d],
                            normalizer_fn=slim.batch_norm):
            with slim.arg_scope([slim.batch_norm],
                                is_training=is_training,
                                center=True,
                                scale=True):
                x = slim.convolution2d(x,
                                       out_channel, [1, 1],
                                       stride=1,
                                       padding='same',
                                       biases_initializer=None,
                                       biases_regularizer=None)

                if res_connect:
                    return x + short_cut
                else:
                    return x
def SepConv(x, kernel_size, stride, name, is_training, labels):
    with tf.variable_scope(name):
        x = tf.nn.relu(x)
        C_in = x.get_shape()[-1].value
        x = slim.separable_convolution2d(x,
                                         C_in,
                                         kernel_size,
                                         depth_multiplier=1,
                                         stride=stride)
        x = NormalizeG(name + 'n1', x, is_training,
                       labels)  #slim.batch_norm(x)
        x = slim.separable_convolution2d(x,
                                         C_in,
                                         kernel_size,
                                         depth_multiplier=1)
        x = NormalizeG(name + 'n2', x, is_training,
                       labels)  #slim.batch_norm(x)
        return x
Beispiel #28
0
def DilConv(x, C_out, kernel_size, stride, rate):
    x = tflearn.relu(x)
    x = slim.separable_convolution2d(x,
                                     C_out,
                                     kernel_size,
                                     depth_multiplier=1,
                                     stride=stride)
    x = slim.batch_norm(x)
    return x
Beispiel #29
0
 def dwscb(self, inputs, num_filters, kernel=[3, 3]):
     net = slim.separable_convolution2d(inputs,
                                        num_outputs=None,
                                        depth_multiplier=1,
                                        kernel_size=kernel,
                                        activation_fn=None)
     net = tf.nn.relu(slim.batch_norm(net, fused=True))
     net = self.Convblock(net, num_filters)
     return net
 def depthwise_conv_bn(self, name, inputs, stride=1):
     with tf.variable_scope(name):
         net = slim.separable_convolution2d(inputs,
                                            None,
                                            kernel_size=3,
                                            depth_multiplier=1,
                                            stride=stride)
         net = slim.batch_norm(net, activation_fn=None)
     return net
Beispiel #31
0
    def res_DW_block(self,inputs,output_channels,stride=1):
        res_channels=output_channels-output_channels/2

        block_net=slim.separable_convolution2d(inputs,None,[3,3],stride=stride)
        block_net=slim.batch_norm(block_net)

        block_net=slim.convolution2d(block_net,output_channels/2,[3,3])

        block_net = slim.batch_norm(block_net)
        res_net=slim.convolution2d(inputs,res_channels,[3,3],stride=stride)
        return tf.concat([block_net,res_net],axis=3)
Beispiel #32
0
def dw_conv(inputs, s, name):
    output = slim.separable_convolution2d(inputs,
                                          num_outputs=None,
                                          stride=s,
                                          depth_multiplier=1,
                                          kernel_size=[3, 3],
                                          normalizer_fn=slim.batch_norm,
                                          scope=name+'_ds_conv')
    if PRINT_LAYER_LOG:
        print(name, output.get_shape())
    return output
Beispiel #33
0
  def _depthwise_separable_conv(inputs,
                                num_pwc_filters,
                                sc,
                                kernel_size,
                                stride):
    """ Helper function to build the depth-wise separable convolution layer.
    """

    # skip pointwise by setting num_outputs=None
    depthwise_conv = slim.separable_convolution2d(inputs,
                                                  num_outputs=None,
                                                  stride=stride,
                                                  depth_multiplier=1,
                                                  kernel_size=kernel_size,
                                                  scope=sc+'/depthwise_conv')

    bn = slim.batch_norm(depthwise_conv, scope=sc+'/dw_batch_norm')
    pointwise_conv = slim.convolution2d(bn,
                                        num_pwc_filters,
                                        kernel_size=[1, 1],
                                        scope=sc+'/pointwise_conv')
    bn = slim.batch_norm(pointwise_conv, scope=sc+'/pw_batch_norm')
    return bn