示例#1
0
def get_block(x_in, ch_in, ch_out, regularizer):
    x = layers.Conv2D(ch_in,
                      kernel_size=(1, 1),
                      padding='same',
                      use_bias=False,
                      kernel_regularizer=regularizer)(x_in)
    x = get_post(x)

    x = layers.DepthwiseConv2D(kernel_size=(1, 3),
                               padding='same',
                               use_bias=False,
                               depthwise_regularizer=regularizer)(x)
    x = get_post(x)
    x = layers.MaxPool2D(pool_size=(2, 1),
                         strides=(2, 1))(x)  # Separable pooling

    x = layers.DepthwiseConv2D(kernel_size=(3, 1),
                               padding='same',
                               use_bias=False,
                               depthwise_regularizer=regularizer)(x)
    x = get_post(x)

    x = layers.Conv2D(ch_out,
                      kernel_size=(2, 1),
                      strides=(1, 2),
                      padding='same',
                      use_bias=False,
                      kernel_regularizer=regularizer)(x)
    x = get_post(x)

    return x
示例#2
0
def ExitFlow(layer_input):

    layer_skip = layers.Conv2D(filters=512,
                               kernel_size=1,
                               strides=2,
                               padding="same",
                               activation="relu",
                               kernel_initializer="he_normal")(layer_input)

    layer1_depth = layers.DepthwiseConv2D(
        kernel_size=3,
        padding="same",
        activation="relu",
        kernel_initializer="he_normal")(layer_input)
    layer1_BN = layers.BatchNormalization()(layer1_depth)
    layer1_separa = layers.SeparableConv2D(
        filters=256,
        kernel_size=1,
        padding="same",
        activation="relu",
        kernel_initializer="he_normal")(layer1_BN)

    layer2_depth = layers.DepthwiseConv2D(
        kernel_size=3,
        padding="same",
        activation="relu",
        kernel_initializer="he_normal")(layer1_separa)
    layer2_BN = layers.BatchNormalization()(layer2_depth)
    layer2_separa = layers.SeparableConv2D(
        filters=512,
        kernel_size=1,
        padding="same",
        activation="relu",
        kernel_initializer="he_normal")(layer2_BN)

    layer3_depth = layers.DepthwiseConv2D(
        kernel_size=3,
        padding="same",
        activation="relu",
        kernel_initializer="he_normal")(layer2_separa)
    layer3_BN = layers.BatchNormalization()(layer3_depth)
    layer3_separa = layers.SeparableConv2D(
        filters=512,
        kernel_size=1,
        strides=2,
        padding="same",
        activation="relu",
        kernel_initializer="he_normal")(layer3_BN)

    layer_process = layers.add([layer3_separa, layer_skip])

    layer1 = MiddleFlow_unit(layer_process, 1024)
    layer2 = MiddleFlow_unit(layer1, 1024)
    layer_out = MiddleFlow_unit(layer2, 2048)

    return layer_out
示例#3
0
def EntryFlow(layer_input, layer_skip, filters):

    layer_skip = layers.Conv2D(filters,
                               kernel_size=1,
                               strides=2,
                               padding="same",
                               activation="relu",
                               kernel_initializer="he_normal")(layer_input)
    # one separable_depthwise
    layer1_depth = layers.DepthwiseConv2D(
        kernel_size=3,
        padding="same",
        activation="relu",
        kernel_initializer="he_normal")(layer_input)
    layer1_BN = layers.BatchNormalization()(layer1_depth)
    layer1_separa = layers.SeparableConv2D(
        filters,
        kernel_size=1,
        padding="same",
        activation="relu",
        kernel_initializer="he_normal")(layer1_BN)

    layer2_depth = layers.DepthwiseConv2D(
        kernel_size=3,
        padding="same",
        activation="relu",
        kernel_initializer="he_normal")(layer1_separa)
    layer2_BN = layers.BatchNormalization()(layer2_depth)
    layer2_separa = layers.SeparableConv2D(
        filters,
        kernel_size=1,
        padding="same",
        activation="relu",
        kernel_initializer="he_normal")(layer2_BN)

    layer3_depth = layers.DepthwiseConv2D(
        kernel_size=3,
        padding="same",
        activation="relu",
        kernel_initializer="he_normal")(layer2_separa)
    layer3_BN = layers.BatchNormalization()(layer3_depth)
    layer3_separa = layers.SeparableConv2D(
        filters,
        kernel_size=3,
        strides=2,
        padding="same",
        activation="relu",
        kernel_initializer="he_normal")(layer3_BN)

    print(layer3_separa.shape, layer_skip.shape)
    block_out = layers.add([layer_skip, layer3_separa])

    return block_out, layer_skip
示例#4
0
def _depthwise_conv_block(inputs,
                          pointwise_conv_filters,
                          alpha,
                          depth_multiplier=1,
                          strides=(1, 1),
                          block_id=1):
    channel_axis = 1
    pointwise_conv_filters = int(pointwise_conv_filters * alpha)

    x = layers.ZeroPadding2D((1, 1), name='conv_pad_%d' % block_id)(inputs)
    # if strides == (1, 1):
    #    x = inputs
    # else:
    #    x = layers.ZeroPadding2D(((0, 1), (0, 1)),
    #                             name='conv_pad_%d' % block_id)(inputs)
    x = layers.DepthwiseConv2D(
        (3, 3),
        padding='valid',
        # padding='same' if strides == (1, 1) else 'valid',
        depth_multiplier=depth_multiplier,
        strides=strides,
        use_bias=False,
        name='conv_dw_%d' % block_id)(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  name='conv_dw_%d_bn' % block_id)(x)
    x = layers.Activation('relu', name='conv_dw_%d_relu' % block_id)(x)
    x = layers.Conv2D(pointwise_conv_filters, (1, 1),
                      padding='same',
                      use_bias=False,
                      strides=(1, 1),
                      name='conv_pw_%d' % block_id)(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  name='conv_pw_%d_bn' % block_id)(x)
    return layers.Activation('relu', name='conv_pw_%d_relu' % block_id)(x)
示例#5
0
def bottleneck(block_id, inputs, output_filters, expansion_factor):
    # Inverted residual block
    bn_name = bn_name_template.format(block_id) + '_'
    input_shape = KK.eval(KK.shape(inputs)[-1])
    expanded_filters = output_filters * expansion_factor
    hidden = layers.Conv2D(filters=expanded_filters,
                           kernel_size=(1, 1),
                           strides=1,
                           padding='same',
                           name=bn_name + "conv0")(inputs)
    hidden = layers.BatchNormalization(name=bn_name + 'bn0')(hidden)
    hidden = layers.ReLU(6., name=bn_name + 'relu0')(hidden)
    hidden = layers.DepthwiseConv2D(kernel_size=(3, 3),
                                    strides=1,
                                    padding='same',
                                    name=bn_name + 'depth_conv0')(hidden)
    hidden = layers.BatchNormalization(name=bn_name + 'bn1')(hidden)
    hidden = layers.ReLU(6., name=bn_name + 'relu1')(hidden)
    hidden = layers.Conv2D(filters=output_filters,
                           kernel_size=(1, 1),
                           strides=1,
                           padding='same',
                           name=bn_name + 'conv1')(hidden)
    hidden = layers.BatchNormalization(name=bn_name + 'bn2')(hidden)

    if input_shape == output_filters:
        print("************** Residual connection made. **************")
        hidden = layers.Add(name=bn_name + 'add0')([inputs, hidden])
    else:
        print("!!!!!!!!!!!!!! No residual connection made. !!!!!!!!!!!!!!")
    print("input_shape: {}\texpanded_filters: {}".format(
        input_shape, expanded_filters))

    return hidden
def rpn(base_layers, num_anchors):

    x = layers.DepthwiseConv2D((5, 5),
                               activation='relu',
                               padding='same',
                               depth_multiplier=1,
                               strides=1,
                               use_bias=False,
                               name='rpn/conv5x5')(base_layers)
    x = layers.Conv2D(256, (1, 1),
                      activation='relu',
                      padding='same',
                      strides=1,
                      use_bias=True,
                      name='rpn/conv1x1')(x)

    x_class = Conv2D(num_anchors, (1, 1),
                     activation='sigmoid',
                     kernel_initializer='uniform',
                     name='rpn_out_class')(x)
    x_regr = Conv2D(num_anchors * 4, (1, 1),
                    activation='linear',
                    kernel_initializer='zero',
                    name='rpn_out_regress')(x)

    return [x_class, x_regr, base_layers]
def LiteConv(x, i, filter_num):
    x = layers.Conv2D(filter_num // 2, (1, 1),
                      padding='same',
                      use_bias=False,
                      name=str(i) + '_pwconv1')(x)
    x = layers.BatchNormalization(momentum=0.99,
                                  name=str(i) + '_pwconv1_bn')(x)
    x = layers.Activation('relu', name=str(i) + '_pwconv1_act')(x)
    x = layers.DepthwiseConv2D(kernel_size=3,
                               strides=2,
                               activation=None,
                               use_bias=False,
                               padding='same',
                               name=str(i) + '_dwconv2')(x)
    x = layers.BatchNormalization(momentum=0.99,
                                  name=str(i) + '_sepconv2_bn')(x)
    x = layers.Activation('relu', name=str(i) + '_sepconv2_act')(x)
    net = layers.Conv2D(filter_num, (1, 1),
                        padding='same',
                        use_bias=False,
                        name=str(i) + '_pwconv3')(x)
    x = layers.BatchNormalization(momentum=0.99,
                                  name=str(i) + '_pwconv3_bn')(net)
    x = layers.Activation('relu', name=str(i) + '_pwconv3_act')(x)
    # print(x.shape)
    return x, net
示例#8
0
def convolution(weights_dict, name, input, group, conv_type, filters=None, **kwargs):
    if not conv_type.startswith('layer'):
        layer = keras.applications.mobilenet.DepthwiseConv2D(name=name, **kwargs)(input)
        return layer
    elif conv_type == 'layers.DepthwiseConv2D':
        layer = layers.DepthwiseConv2D(name=name, **kwargs)(input)
        return layer
    
    inp_filters = K.int_shape(input)[-1]
    inp_grouped_channels = int(inp_filters / group)
    out_grouped_channels = int(filters / group)
    group_list = []
    if group == 1:
        func = getattr(layers, conv_type.split('.')[-1])
        layer = func(name = name, filters = filters, **kwargs)(input)
        return layer
    weight_groups = list()
    if not weights_dict == None:
        w = np.array(weights_dict[name]['weights'])
        weight_groups = np.split(w, indices_or_sections=group, axis=-1)
    for c in range(group):
        x = layers.Lambda(lambda z: z[..., c * inp_grouped_channels:(c + 1) * inp_grouped_channels])(input)
        x = layers.Conv2D(name=name + "_" + str(c), filters=out_grouped_channels, **kwargs)(x)
        weights_dict[name + "_" + str(c)] = dict()
        weights_dict[name + "_" + str(c)]['weights'] = weight_groups[c]
        group_list.append(x)
    layer = layers.concatenate(group_list, axis = -1)
    if 'bias' in weights_dict[name]:
        b = K.variable(weights_dict[name]['bias'], name = name + "_bias")
        layer = layer + b
    return layer
示例#9
0
def _depthwise_conv_block_td(inputs,
                             pointwise_conv_filters,
                             alpha,
                             depth_multiplier=1,
                             strides=(1, 1),
                             block_id=1):

    channel_axis = 3
    pointwise_conv_filters = int(pointwise_conv_filters * alpha)

    if strides == (1, 1):
        x = inputs
    else:
        x = layers.ZeroPadding2D(((0, 1), (0, 1)),
                                 name='conv_pad_%d' % block_id)(inputs)
    x = TimeDistributed(layers.DepthwiseConv2D(
        (3, 3),
        padding='same' if strides == (1, 1) else 'valid',
        depth_multiplier=depth_multiplier,
        strides=strides,
        use_bias=False),
                        name='conv_dw_td_%d' % block_id)(x)
    x = TimeDistributed(layers.BatchNormalization(axis=channel_axis),
                        name='conv_dw_td_%d_bn' % block_id)(x)
    x = layers.ReLU(6., name='conv_dw_td_%d_relu' % block_id)(x)

    x = TimeDistributed(layers.Conv2D(pointwise_conv_filters, (1, 1),
                                      padding='same',
                                      use_bias=False,
                                      strides=(1, 1)),
                        name='conv_pw_td_%d' % block_id)(x)
    x = TimeDistributed(layers.BatchNormalization(axis=channel_axis),
                        name='conv_pw_rd_%d_bn' % block_id)(x)
    return layers.ReLU(6., name='conv_pw_td_%d_relu' % block_id)(x)
def dw_sub_block(inputs,
                 stride,
                 alpha,
                 filters,
                 stage=1,
                 block_id=1,
                 output2=False):
    in_channels = K.int_shape(inputs)[-1]
    pointwise_conv_filters = int(filters * alpha)
    pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
    x = inputs
    name = 'sub_stage{}_block{}'.format(stage, block_id)

    # Depthwise
    if stride == 2:
        x = KL.ZeroPadding2D(padding=correct_pad(K, x, 3),
                             name=name + '_dw_pad')(x)
    x = KL.DepthwiseConv2D(kernel_size=3,
                           strides=stride,
                           activation=None,
                           use_bias=False,
                           padding='same' if stride == 1 else 'valid',
                           name=name + '_dw_conv')(x)
    x = KL.BatchNormalization(epsilon=1e-3,
                              momentum=0.999,
                              name=name + '_dw_bn')(x)

    x = KL.ReLU(6., name=name + '_dw_relu')(x)

    return x
示例#11
0
def depthwise_block(x, nb_filters, alpha, strides):
    """ Create a Depthwise Separable Convolution block
        inputs    : input tensor
        nb_filters: number of filters
        alpha     : width multiplier
        strides   : strides
    """
    # Apply the width filter to the number of feature maps
    filters = int(nb_filters * alpha)

    # Strided convolution to match number of filters
    if strides == (2, 2):
        x = layers.ZeroPadding2D(padding=((0, 1), (0, 1)))(x)
        padding = 'valid'
    else:
        padding = 'same'

    # Depthwise Convolution
    x = layers.DepthwiseConv2D((3, 3), strides, padding=padding)(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)

    # Pointwise Convolution
    x = layers.Conv2D(filters, (1, 1), strides=(1, 1), padding='same')(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)
    return x
示例#12
0
    def block(inputs):
        x = inputs

        # Expansion phase
        if block_args.expand_ratio != 1:
            expand_conv = layers.Conv2D(
                filters,
                kernel_size=[1, 1],
                strides=[1, 1],
                kernel_initializer=conv_kernel_initializer,
                padding='same',
                use_bias=False,
                name=block_name + 'expansion_conv2d')(x)
            bn0 = layers.BatchNormalization(
                momentum=batch_norm_momentum,
                epsilon=batch_norm_epsilon,
                name=block_name + 'expansion_batch_norm')(expand_conv)

            x = Swish(name=block_name + 'expansion_swish')(bn0)

        # Depth-wise convolution phase
        kernel_size = block_args.kernel_size
        depthwise_conv = layers.DepthwiseConv2D(
            [kernel_size, kernel_size],
            strides=block_args.strides,
            depthwise_initializer=conv_kernel_initializer,
            padding='same',
            use_bias=False,
            name=block_name + 'depthwise_conv2d')(x)
        bn1 = layers.BatchNormalization(momentum=batch_norm_momentum,
                                        epsilon=batch_norm_epsilon,
                                        name=block_name +
                                        'depthwise_batch_norm')(depthwise_conv)
        x = Swish(name=block_name + 'depthwise_swish')(bn1)

        if has_se:
            x = SEBlock(block_args, block_name=block_name)(x)

        # Output phase
        project_conv = layers.Conv2D(
            block_args.output_filters,
            kernel_size=[1, 1],
            strides=[1, 1],
            kernel_initializer=conv_kernel_initializer,
            padding='same',
            name=block_name + 'output_conv2d',
            use_bias=False)(x)
        x = layers.BatchNormalization(momentum=batch_norm_momentum,
                                      epsilon=batch_norm_epsilon,
                                      name=block_name +
                                      'output_batch_norm')(project_conv)
        if block_args.id_skip:
            if all(s == 1 for s in block_args.strides
                   ) and block_args.input_filters == block_args.output_filters:
                # only apply drop_connect if skip presents.
                if drop_connect_rate:
                    x = DropConnect(drop_connect_rate)(x)
                x = layers.add([x, inputs])

        return x
def depthwiseConv_bn(x, depth_multiplier, kernel_size, strides=1):
    """ Depthwise convolution
    The DepthwiseConv2D is just the first step of the Depthwise Separable convolution (without the pointwise step).
    Depthwise Separable convolutions consists in performing just the first step in a depthwise spatial convolution
    (which acts on each input channel separately).

    This function defines a 2D Depthwise separable convolution operation with BN and relu6.
    # Arguments
        x: Tensor, input tensor of conv layer.
        filters: Integer, the dimensionality of the output space.
        kernel_size: An integer or tuple/list of 2 integers, specifying the
            width and height of the 2D convolution window.
        strides: An integer or tuple/list of 2 integers,
            specifying the strides of the convolution along the width and height.
            Can be a single integer to specify the same value for
            all spatial dimensions.
    # Returns
        Output tensor.
    """

    x = layers.DepthwiseConv2D(kernel_size=kernel_size, strides=strides, depth_multiplier=depth_multiplier,
                               padding='same', use_bias=False, kernel_regularizer=regularizers.l2(l=0.0003))(x)
    x = layers.BatchNormalization(epsilon=1e-3, momentum=0.999)(x)
    x = layers.ReLU(max_value=6)(x)
    return x
示例#14
0
def _depthwise_conv_block(inputs, pointwise_conv_filters, alpha,
                          depth_multiplier=1, strides=(1, 1), block_id=1):    
    channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
    pointwise_conv_filters = int(pointwise_conv_filters * alpha)

    if strides == (1, 1):
        x = inputs
    else:
        x = layers.ZeroPadding2D(((0, 1), (0, 1)),
                                 name='conv_pad_%d' % block_id)(inputs)
    x = layers.DepthwiseConv2D((3, 3),
                               padding='same' if strides == (1, 1) else 'valid',
                               depth_multiplier=depth_multiplier,
                               strides=strides,
                               use_bias=False,
                               name='conv_dw_%d' % block_id)(x)
    x = layers.BatchNormalization(
        axis=channel_axis, name='conv_dw_%d_bn' % block_id)(x)
    x = layers.ReLU(6., name='conv_dw_%d_relu' % block_id)(x)

    x = layers.Conv2D(pointwise_conv_filters, (1, 1),
                      padding='same',
                      use_bias=False,
                      strides=(1, 1),
                      name='conv_pw_%d' % block_id)(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  name='conv_pw_%d_bn' % block_id)(x)
    return layers.ReLU(6., name='conv_pw_%d_relu' % block_id)(x)
示例#15
0
 def DepthwiseConv2DTranspose(self,
                              x,
                              kernel_size,
                              name=None,
                              pad=1,
                              strides=1,
                              use_bias=False):
     #keras and tensorflow grouped transpose convolutinoal unsupported
     up_x = KL.Lambda(lambda x: tf.reshape(
         tf.transpose(
             tf.reshape(
                 tf.concat([
                     x,
                     tf.tile(tf.zeros_like(x),
                             [1, 1, 1, strides * strides - 1])
                 ],
                           axis=-1), [
                               tf.shape(x)[0],
                               tf.shape(x)[1],
                               tf.shape(x)[2], strides, strides, x.shape[3]
                           ]), [0, 1, 3, 2, 4, 5]), [
                               tf.shape(x)[0],
                               tf.shape(x)[1] * strides,
                               tf.shape(x)[2] * strides, x.shape[3]
                           ]))(x)
     up_x = KL.ZeroPadding2D(
         ((pad, pad - (strides - 1)), (pad, pad - (strides - 1))))(up_x)
     return KL.DepthwiseConv2D((kernel_size, kernel_size),
                               name=name,
                               use_bias=use_bias)(up_x)
    def block(inputs):

        if expand_ratio != 1:
            x = layers.Conv2D(
                filters,
                kernel_size=[1, 1],
                strides=[1, 1],
                kernel_initializer=EfficientNetConvInitializer(),
                padding='same',
                use_bias=False)(inputs)
            x = layers.BatchNormalization(
                axis=channel_axis,
                momentum=batch_norm_momentum,
                epsilon=batch_norm_epsilon)(x)
            x = Swish()(x)
        else:
            x = inputs

        x = layers.DepthwiseConv2D(
            [kernel_size, kernel_size],
            strides=strides,
            depthwise_initializer=EfficientNetConvInitializer(),
            padding='same',
            use_bias=False)(x)
        x = layers.BatchNormalization(
            axis=channel_axis,
            momentum=batch_norm_momentum,
            epsilon=batch_norm_epsilon)(x)
        x = Swish()(x)

        if has_se:
            x = SEBlock(input_filters, se_ratio, expand_ratio,
                        data_format)(x)

        # output phase

        x = layers.Conv2D(
            output_filters,
            kernel_size=[1, 1],
            strides=[1, 1],
            kernel_initializer=EfficientNetConvInitializer(),
            padding='same',
            use_bias=False)(x)
        x = layers.BatchNormalization(
            axis=channel_axis,
            momentum=batch_norm_momentum,
            epsilon=batch_norm_epsilon)(x)

        if id_skip:
            if all(s == 1 for s in strides) and (
                    input_filters == output_filters):

                # only apply drop_connect if skip presents.
                if drop_connect_rate:
                    x = DropConnect(drop_connect_rate)(x)

                x = layers.Add()([x, inputs])

        return x
示例#17
0
    def _inverted_res_block(self, inputs, expansion, stride, alpha, filters,
                            block_id):
        channel_axis = -1

        in_channels = backend.int_shape(inputs)[channel_axis]
        pointwise_conv_filters = int(filters * alpha)
        pointwise_filters = self._make_divisible(pointwise_conv_filters, 8)
        x = inputs
        prefix = 'block_{}_'.format(block_id)

        if block_id:
            # Expand
            x = layers.Conv2D(expansion * in_channels,
                              kernel_size=1,
                              padding='same',
                              use_bias=False,
                              activation=None,
                              name=prefix + 'expand')(x)
            x = layers.BatchNormalization(axis=channel_axis,
                                          epsilon=1e-3,
                                          momentum=0.999,
                                          name=prefix + 'expand_BN')(x)
            x = layers.ReLU(6., name=prefix + 'expand_relu')(x)
        else:
            prefix = 'expanded_conv_'

        # Depthwise
        if stride == 2:
            x = layers.ZeroPadding2D(padding=correct_pad(backend, x, 3),
                                     name=prefix + 'pad')(x)
        x = layers.DepthwiseConv2D(kernel_size=3,
                                   strides=stride,
                                   activation=None,
                                   use_bias=False,
                                   padding='same' if stride == 1 else 'valid',
                                   name=prefix + 'depthwise')(x)
        x = layers.BatchNormalization(axis=channel_axis,
                                      epsilon=1e-3,
                                      momentum=0.999,
                                      name=prefix + 'depthwise_BN')(x)

        x = layers.ReLU(6., name=prefix + 'depthwise_relu')(x)

        # Project
        x = layers.Conv2D(pointwise_filters,
                          kernel_size=1,
                          padding='same',
                          use_bias=False,
                          activation=None,
                          name=prefix + 'project')(x)
        x = layers.BatchNormalization(axis=channel_axis,
                                      epsilon=1e-3,
                                      momentum=0.999,
                                      name=prefix + 'project_BN')(x)

        if in_channels == pointwise_filters and stride == 1:
            return layers.Add(name=prefix + 'add')([inputs, x])
        return x
示例#18
0
def bottleneck(inputs,
               filters,
               kernel,
               t,
               s,
               r=False,
               alpha=1.0,
               block_id=1,
               train_bn=False):
    """Bottleneck
    This function defines a basic bottleneck structure.
    # Arguments
        inputs: Tensor, input tensor of conv layer.
        filters: Integer, the dimensionality of the output space.
        kernel: An integer or tuple/list of 2 integers, specifying the
            width and height of the 2D convolution window.
        t: Integer, expansion factor.
            t is always applied to the input size.
        s: An integer or tuple/list of 2 integers,specifying the strides
            of the convolution along the width and height.Can be a single
            integer to specify the same value for all spatial dimensions.
        r: Boolean, Whether to use the residuals.
        block_id: Id of the bottleneck
        train_bn: Boolean. Train or freeze Batch Norm layers
    # Returns
        Output tensor.
    """

    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
    tchannel = K.int_shape(inputs)[channel_axis] * t
    filters = int(alpha * filters)

    x = conv_block(inputs,
                   tchannel,
                   alpha, (1, 1), (1, 1),
                   block_id=block_id,
                   train_bn=train_bn)

    x = KL.DepthwiseConv2D(kernel,
                           strides=(s, s),
                           depth_multiplier=1,
                           padding='same',
                           name='conv_dw_{}'.format(block_id))(x)
    x = BatchNorm(axis=channel_axis,
                  name='conv_dw_{}_bn'.format(block_id))(x, training=train_bn)
    x = KL.Activation(relu6, name='conv_dw_{}_relu'.format(block_id))(x)

    x = KL.Conv2D(filters, (1, 1),
                  strides=(1, 1),
                  padding='same',
                  name='conv_pw_{}'.format(block_id))(x)
    x = BatchNorm(axis=channel_axis,
                  name='conv_pw_{}_bn'.format(block_id))(x, training=train_bn)

    if r:
        x = KL.add([x, inputs], name='res{}'.format(block_id))
    return x
示例#19
0
    def block(inputs):

        if block_args.expand_ratio != 1:
            x = KL.Conv2D(
                filters,
                kernel_size=[1, 1],
                strides=[1, 1],
                kernel_initializer=conv_kernel_initializer,
                padding="same",
                use_bias=False,
            )(inputs)
            x = KL.BatchNormalization(
                axis=channel_axis,
                momentum=batch_norm_momentum,
                epsilon=batch_norm_epsilon,
            )(x)
            x = Swish()(x)
        else:
            x = inputs

        x = KL.DepthwiseConv2D(
            [kernel_size, kernel_size],
            strides=block_args.strides,
            depthwise_initializer=conv_kernel_initializer,
            padding="same",
            use_bias=False,
        )(x)
        x = KL.BatchNormalization(axis=channel_axis,
                                  momentum=batch_norm_momentum,
                                  epsilon=batch_norm_epsilon)(x)
        x = Swish()(x)

        if has_se:
            x = SEBlock(block_args, global_params)(x)

        # output phase

        x = KL.Conv2D(
            block_args.output_filters,
            kernel_size=[1, 1],
            strides=[1, 1],
            kernel_initializer=conv_kernel_initializer,
            padding="same",
            use_bias=False,
        )(x)
        x = KL.BatchNormalization(axis=channel_axis,
                                  momentum=batch_norm_momentum,
                                  epsilon=batch_norm_epsilon)(x)

        if block_args.id_skip:
            if (all(s == 1 for s in block_args.strides)
                    and block_args.input_filters == block_args.output_filters):
                # only apply drop_connect if skip presents.
                if drop_connect_rate:
                    x = DropConnect(drop_connect_rate)(x)
                x = KL.Add()([x, inputs])
        return x
示例#20
0
    def build(x):
        with K.name_scope(name):
            if conv_shortcut is True:
                shortcut = layers.Conv2D((64 // groups) * filters,
                                         1,
                                         strides=stride,
                                         use_bias=False,
                                         name=name + '_0_conv')(x)
                shortcut = layers.BatchNormalization(axis=bn_axis,
                                                     epsilon=1.001e-5,
                                                     name=name +
                                                     '_0_bn')(shortcut)
            else:
                shortcut = x

            x = layers.Conv2D(filters,
                              1,
                              use_bias=False,
                              name=name + '_1_conv')(x)
            x = layers.BatchNormalization(axis=bn_axis,
                                          epsilon=1.001e-5,
                                          name=name + '_1_bn')(x)
            x = layers.Activation('relu', name=name + '_1_relu')(x)

            c = filters // groups
            x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)),
                                     name=name + '_2_pad')(x)
            x = layers.DepthwiseConv2D(kernel_size,
                                       strides=stride,
                                       depth_multiplier=c,
                                       use_bias=False,
                                       name=name + '_2_conv')(x)
            x_shape = K.int_shape(x)[1:-1]
            x = layers.Reshape(x_shape + (groups, c, c))(x)
            output_shape = x_shape + \
                (groups, c) if K.backend() == 'theano' else None
            x = layers.Lambda(
                lambda x: sum([x[:, :, :, :, i] for i in range(c)]),
                output_shape=output_shape,
                name=name + '_2_reduce')(x)
            x = layers.Reshape(x_shape + (filters, ))(x)
            x = layers.BatchNormalization(axis=bn_axis,
                                          epsilon=1.001e-5,
                                          name=name + '_2_bn')(x)
            x = layers.Activation('relu', name=name + '_2_relu')(x)

            x = layers.Conv2D((64 // groups) * filters,
                              1,
                              use_bias=False,
                              name=name + '_3_conv')(x)
            x = layers.BatchNormalization(axis=bn_axis,
                                          epsilon=1.001e-5,
                                          name=name + '_3_bn')(x)

            x = layers.Add(name=name + '_add')([shortcut, x])
            x = layers.Activation('relu', name=name + '_out')(x)
        return x
示例#21
0
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id,
                        leaky_relu):
    """Utility function used in MobileNetV2."""
    in_channels = backend.int_shape(inputs)[-1]
    pointwise_conv_filters = int(filters * alpha)
    pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
    x = inputs
    prefix = 'block_{}_'.format(block_id)

    if block_id:
        # Expand
        x = layers.Conv2D(expansion * in_channels,
                          kernel_size=1,
                          padding='same',
                          use_bias=False,
                          activation=None,
                          name=prefix + 'expand')(x)
        x = layers.BatchNormalization(epsilon=1e-3,
                                      momentum=0.999,
                                      name=prefix + 'expand_BN')(x)
        x = _mobilenetv2_relu_activate(name=prefix + 'expand_relu',
                                       leaky_relu=leaky_relu)(x)
    else:
        prefix = 'expanded_conv_'

    # Depthwise
    if stride == 2:
        x = layers.ZeroPadding2D(padding=correct_pad(backend, x, 3),
                                 name=prefix + 'pad')(x)
    x = layers.DepthwiseConv2D(kernel_size=3,
                               strides=stride,
                               activation=None,
                               use_bias=False,
                               padding='same' if stride == 1 else 'valid',
                               name=prefix + 'depthwise')(x)
    x = layers.BatchNormalization(epsilon=1e-3,
                                  momentum=0.999,
                                  name=prefix + 'depthwise_BN')(x)

    x = _mobilenetv2_relu_activate(name=prefix + 'depthwise_relu',
                                   leaky_relu=leaky_relu)(x)

    # Project
    x = layers.Conv2D(pointwise_filters,
                      kernel_size=1,
                      padding='same',
                      use_bias=False,
                      activation=None,
                      name=prefix + 'project')(x)
    x = layers.BatchNormalization(epsilon=1e-3,
                                  momentum=0.999,
                                  name=prefix + 'project_BN')(x)

    if in_channels == pointwise_filters and stride == 1:
        return layers.Add(name=prefix + 'add')([inputs, x])
    return x
示例#22
0
    def __init__(self,
                 filters,
                 kernels,
                 groups,
                 type='conv',
                 conv_kwargs=None,
                 **kwargs):
        super(GroupConvolution, self).__init__(**kwargs)

        if conv_kwargs is None:
            conv_kwargs = {
                'strides': (1, 1),
                'padding': 'same',
                'dilation_rate': (1, 1),
                'use_bias': False,
            }

        self.filters = filters
        self.kernels = kernels
        self.groups = groups
        self.type = type
        self.strides = conv_kwargs.get('strides', (1, 1))
        self.padding = conv_kwargs.get('padding', 'same')
        self.dilation_rate = conv_kwargs.get('dilation_rate', (1, 1))
        self.use_bias = conv_kwargs.get('use_bias', False)
        self.conv_kwargs = conv_kwargs or {}

        assert type in ['conv', 'depthwise_conv']
        if type == 'conv':
            splits = self._split_channels(filters, self.groups)
            self._layers = [
                layers.Conv2D(splits[i],
                              kernels[i],
                              strides=self.strides,
                              padding=self.padding,
                              dilation_rate=self.dilation_rate,
                              use_bias=self.use_bias,
                              kernel_initializer=MixNetConvInitializer())
                for i in range(groups)
            ]

        else:
            self._layers = [
                layers.DepthwiseConv2D(
                    kernels[i],
                    strides=self.strides,
                    padding=self.padding,
                    dilation_rate=self.dilation_rate,
                    use_bias=self.use_bias,
                    kernel_initializer=MixNetConvInitializer())
                for i in range(groups)
            ]

        self.data_format = 'channels_last'
        self._channel_axis = -1
示例#23
0
def xnet(depth=8,
         n_filters=64,
         kernel_size=(3, 3),
         skernel_size=(9, 9),
         n_channels=1,
         channels_first=False):
    """xNet implementation on Keras. Implementation followed the paper [1]_.

    Notes
    -----
    The implementation is based on the Pytorch version, available on `this Github page
    <https://github.com/kligvasser/xUnit>`_.

    Parameters
    ----------
    depth : int
        Number of fully convolutional layers in dncnn. In the original paper, the authors have used depth=17 for non-
        blind denoising and depth=20 for blind denoising.
    n_filters : int
        Number of filters on each convolutional layer.
    kernel_size : int tuple
        2D Tuple specifying the size of the kernel window used to compute activations.
    n_channels : int
        Number of image channels that the network processes (1 for grayscale, 3 for RGB)
    channels_first : bool
        Whether channels comes first (NCHW, True) or last (NHWC, False)

    Returns
    -------
    :class:`keras.models.Model`
        Keras model object representing the Neural Network.

    References
    ----------
    .. [1] Kligvasser I, Rott Shaham T, Michaeli T. xUnit: Learning a spatial activation function for efficient image
           restoration. InProceedings of the IEEE Conference on Computer Vision and Pattern Recognition 2018
    """
    x = layers.Input(shape=[None, None, n_channels])
    y = x
    for _ in range(depth):
        z = layers.Conv2D(filters=n_filters,
                          kernel_size=kernel_size,
                          padding="same",
                          use_bias=False)(y)
        z = layers.BatchNormalization()(z)
        z = layers.Activation('relu')(z)
        z = layers.DepthwiseConv2D(kernel_size=skernel_size,
                                   padding="same",
                                   use_bias=False)(z)
        d = layers.BatchNormalization()(z)
        g = layers.Lambda(lambda x: backend.exp(-backend.square(x)))(d)

        y = layers.Multiply()([y, g])
    y = layers.Conv2D(filters=1, kernel_size=kernel_size, padding="same")(y)
    return models.Model(x, y)
示例#24
0
def block3(x, filters, kernel_size=3, stride=1, groups=32,
           conv_shortcut=True, name=None):
    """A residual block.
    # Arguments
        x: input tensor.
        filters: integer, filters of the bottleneck layer.
        kernel_size: default 3, kernel size of the bottleneck layer.
        stride: default 1, stride of the first layer.
        groups: default 32, group size for grouped convolution.
        conv_shortcut: default True, use convolution shortcut if True,
            otherwise identity shortcut.
        name: string, block label.
    # Returns
        Output tensor for the residual block.
    """
    bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1

    if conv_shortcut is True:
        shortcut = layers.Conv2D((64 // groups) * filters, 1, strides=stride,
                                 use_bias=False, name=name + '_0_conv')(x)
        shortcut = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
                                             name=name + '_0_bn')(shortcut)
    else:
        shortcut = x

    x = layers.Conv2D(filters, 1, use_bias=False, name=name + '_1_conv')(x)
    x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
                                  name=name + '_1_bn')(x)
    x = layers.Activation('relu', name=name + '_1_relu')(x)

    c = filters // groups
    x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x)
    x = layers.DepthwiseConv2D(kernel_size, strides=stride, depth_multiplier=c,
                               use_bias=False, name=name + '_2_conv')(x)
    kernel = np.zeros((1, 1, filters * c, filters), dtype=np.float32)
    for i in range(filters):
        start = (i // c) * c * c + i % c
        end = start + c * c
        kernel[:, :, start:end:c, i] = 1.
    x = layers.Conv2D(filters, 1, use_bias=False, trainable=False,
                      kernel_initializer={'class_name': 'Constant',
                                          'config': {'value': kernel}},
                      name=name + '_2_gconv')(x)
    x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
                                  name=name + '_2_bn')(x)
    x = layers.Activation('relu', name=name + '_2_relu')(x)

    x = layers.Conv2D((64 // groups) * filters, 1,
                      use_bias=False, name=name + '_3_conv')(x)
    x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
                                  name=name + '_3_bn')(x)

    x = layers.Add(name=name + '_add')([shortcut, x])
    x = layers.Activation('relu', name=name + '_out')(x)
    return x
示例#25
0
    def call(x):
        x = layers.DepthwiseConv2D(kernel_size, strides=strides, dilation_rate=dilation_rate, padding='same',
                                   kernel_initializer='he_normal', use_bias=False, name=depthwise_name)(x)
        x = layers.BatchNormalization()(x)
        if activation_fn_in_separable_conv:
            x = layers.Activation('relu')(x)
        x = layers.Conv2D(filters, (1, 1), padding='same', kernel_initializer='he_normal', use_bias=False,
                          name=pointwise_name)(x)
        x = layers.BatchNormalization(name=output_bn)(x)

        return x
示例#26
0
文件: BaseNet.py 项目: RuaHU/TLfPS
def ATLnet(inputs, layer=0, SEnet=False):
    reg[0] = l1l2_reg
    reid_map = inputs[layer]
    reid_map = LookWhat(reid_map, name='ALTnet_LookWhat')
    #reid_map = LookWhere(reid_map, name = 'ALTnet_LookWhere')
    dim = 256 if reid_map._keras_shape[-1] <= 256 else reid_map._keras_shape[-1]
    reid_map = DarknetConv2D1(dim, 1, strides=(1, 1), name='yfm_dc3')(reid_map)
    reid_map = KL.DepthwiseConv2D(3,
                                  padding='same',
                                  kernel_regularizer=reg[0],
                                  name='yfm_dwc4')(reid_map)
    return [reid_map]
示例#27
0
def bn_dwconv_valid(inputs, k_size, st, name):
    '''
    the depthwise convolution. contains BN and depthwise convolution
    '''
    br = layers.BatchNormalization(axis=-1, name='{}_bn'.format(name))(inputs)
    br = layers.DepthwiseConv2D(k_size,
                                strides=(st, st),
                                padding='valid',
                                depthwise_regularizer=l2(0.002),
                                use_bias=False,
                                name='{}_dw'.format(name))(br)
    return br
示例#28
0
def depthwiseConv(kernel_size,
                  strides=1,
                  depth_multiplier=1,
                  dilation_rate=1,
                  use_bias=False):
    return layers.DepthwiseConv2D(kernel_size,
                                  strides=strides,
                                  depth_multiplier=depth_multiplier,
                                  padding='same',
                                  use_bias=use_bias,
                                  kernel_regularizer=regularizers.l2(l=0.0003),
                                  dilation_rate=dilation_rate)
def _sep_conv_bn(x,
                 filters,
                 prefix,
                 stride=1,
                 kernel_size=3,
                 rate=1,
                 depth_activation=False,
                 epsilon=1e-3):
    """ SepConv with BN between depthwise & pointwise. Optionally add activation after BN
        Implements right "same" padding for even kernel sizes
        Args:
            x: input tensor
            filters: num of filters in pointwise convolution
            prefix: prefix before name
            stride: stride at depthwise conv
            kernel_size: kernel size for depthwise convolution
            rate: atrous rate for depthwise convolution
            depth_activation: flag to use activation between depthwise & poinwise convs
            epsilon: epsilon to use in BN layer
    """

    if stride == 1:
        depth_padding = 'same'
    else:
        kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
        pad_total = kernel_size_effective - 1
        pad_beg = pad_total // 2
        pad_end = pad_total - pad_beg
        x = layers.ZeroPadding2D((pad_beg, pad_end))(x)
        depth_padding = 'valid'

    if not depth_activation:
        x = layers.Activation('relu')(x)
    x = layers.DepthwiseConv2D((kernel_size, kernel_size),
                               strides=(stride, stride),
                               dilation_rate=(rate, rate),
                               padding=depth_padding,
                               use_bias=False,
                               name=prefix + '_depthwise')(x)
    x = layers.BatchNormalization(name=prefix + '_depthwise_BN',
                                  epsilon=epsilon)(x)
    if depth_activation:
        x = layers.Activation('relu')(x)
    x = layers.Conv2D(filters, (1, 1),
                      padding='same',
                      use_bias=False,
                      name=prefix + '_pointwise')(x)
    x = layers.BatchNormalization(name=prefix + '_pointwise_BN',
                                  epsilon=epsilon)(x)
    if depth_activation:
        x = layers.Activation('relu')(x)

    return x
示例#30
0
def _depthwise_conv_block(inputs, pointwise_conv_filters, params,
                          strides=(1, 1), dropout=2**-6, block_id=1):
    """Adds a depth-wise convolution block."""

    p = params
    channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
    pointwise_conv_filters = int(pointwise_conv_filters * p.alpha)

    if p.activity_regularizer is not None:
        activity_regularizer = regularizers.l2(p.activity_regularizer)
    else:
        activity_regularizer = None

    if p.max_value is not None:
        max_constraint = MaxConstraint(p.max_value)
    else:
        max_constraint = None

    x = layers.DepthwiseConv2D((3, 3),
                               padding='same',
                               strides=strides,
                               use_bias=p.use_bias,
                               activity_regularizer=activity_regularizer,
                               kernel_regularizer=regularizers.l2(p.weight_regularizer),
                               kernel_constraint=max_constraint,
                               bias_constraint=max_constraint,
                               name='conv_dw_%d' % block_id)(inputs)
    x = layers.BatchNormalization(axis=channel_axis,
                                  beta_constraint=max_constraint,
                                  gamma_constraint=max_constraint,
                                  name='conv_dw_%d_bn' % block_id)(x)
    x = layers.ReLU(max_value=params.max_value, name='conv_dw_%d_relu' % block_id)(x)

    x = layers.Conv2D(pointwise_conv_filters, (1, 1),
                      padding='same',
                      use_bias=p.use_bias,
                      strides=(1, 1),
                      activity_regularizer=activity_regularizer,
                      kernel_regularizer=regularizers.l2(p.weight_regularizer),
                      kernel_constraint=max_constraint,
                      bias_constraint=max_constraint,
                      name='conv_pw_%d' % block_id)(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  beta_constraint=max_constraint,
                                  gamma_constraint=max_constraint,
                                  name='conv_pw_%d_bn' % block_id)(x)
    x = layers.ReLU(max_value=params.max_value, name='conv_pw_%d_relu' % block_id)(x)
    if dropout > 0:
        x = layers.Dropout(dropout)(x)

    return x