def bottleneck_downsample(x, output_depth, internal_scale=4, Momentum=0.1):
    internal_depth = int(output_depth / internal_scale)

    x_conv = Conv2D(internal_depth, (2, 2), strides=(2, 2))(x)
    x_conv = BatchNormalization(momentum=Momentum)(x_conv)
    x_conv = Activation("relu")(x_conv)

    x_conv = ZeroPadding2D(padding=((1, 1), (1, 1)))(x_conv)
    x_conv = Conv2D(internal_depth, (3, 3))(x_conv)
    x_conv = BatchNormalization(momentum=Momentum)(x_conv)
    x_conv = Activation("relu")(x_conv)

    x_conv = Conv2D(output_depth, (1, 1), use_bias=False)(x_conv)
    x_conv = BatchNormalization(momentum=Momentum)(x_conv)
    x_conv = SpatialDropout2D(0.01)(x_conv)

    x_pool = MaxPooling2D(pool_size=(2, 2))(x)

    x = Concatenate(axis=3)([x_conv, x_pool])
    x = Conv2D(output_depth, (1, 1))(x)
    x = BatchNormalization(momentum=Momentum)(x)
    y = Activation("relu")(x)
    return y
Esempio n. 2
0
def convolutional(input_layer, filters_shape, downsample=False, activate=True, bn=True, activate_type='leaky'):
    if downsample:
        input_layer = ZeroPadding2D(((1, 0), (1, 0)))(input_layer)
        padding = 'valid'
        strides = 2
    else:
        strides = 1
        padding = 'same'

    conv = Conv2D(filters=filters_shape[-1], kernel_size = filters_shape[0], strides=strides,
                  padding=padding, use_bias=not bn, kernel_regularizer=l2(0.0005),
                  kernel_initializer=tf.random_normal_initializer(stddev=0.01),
                  bias_initializer=tf.constant_initializer(0.))(input_layer)
    
    if bn:
        conv = BatchNormalization()(conv)
    if activate == True:
        if activate_type == "leaky":
            conv = LeakyReLU(alpha=0.1)(conv)
        elif activate_type == "mish":
            conv = mish(conv)

    return conv
def refunit(divider,ch):

    image_input = Input(shape=(int(img_y/divider), int(img_x/divider), ch))
    x = Conv2D(64, (7, 7), strides=(2, 2), padding='same', name='conv1')(image_input)
    x = BatchNormalization(axis=3, name='bn_conv1')(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((3, 3))(x)

    x = encoding_conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
    x = encoding_conv_block(x, 3, [128, 128, 512], stage=3, block='a')

    x = decoding_conv_block(x, 3, [512, 512, 128], stage=6, block='a')

    x = decoding_conv_block(x, 3, [256, 256, 64], stage=7, block='a')
    x=ZeroPadding2D(padding=(0,1),data_format=None)(x)

    x = UpSampling2D(size=(3, 3))(x)
    x = Cropping2D(cropping=((2, 2), (1, 1)), data_format=None)(x)
    x = Conv2DTranspose(1, (3, 3), padding='same', name='c8o')(x)
    x = Activation('sigmoid')(x)
    modelo = Model(inputs=image_input, outputs=x)
    modelo.summary()
    return modelo
Esempio n. 4
0
def _conv_block(inp, convs, do_skip=True):
    x = inp
    count = 0

    for conv in convs:
        if count == (len(convs) - 2) and do_skip:
            skip_connection = x
        count += 1

        if conv['stride'] > 1:
            x = ZeroPadding2D(((1, 0), (1, 0)))(x)
        x = Conv2D(conv['filter'],
                   conv['kernel'],
                   strides=conv['stride'],
                   padding='valid' if conv['stride'] > 1 else 'same',
                   name='conv_' + str(conv['layer_idx']),
                   use_bias=False if conv['bnorm'] else True)(x)
        if conv['bnorm']:
            x = BatchNormalization(epsilon=0.001, name='bnorm_' + str(conv['layer_idx']))(x)
        if conv['leaky']:
            x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']))(x)

    return add([skip_connection, x]) if do_skip else x
 def __init__(self, filters, prefix, stride=1, kernel_size=3, rate=1):
     super(conv2d_same, self).__init__()
     self.stride = stride
     if self.stride == 1:
         self.conv2a = Conv2D(filters, (kernel_size, kernel_size),
                              strides=(self.stride, self.stride),
                              padding='same',
                              use_bias=False,
                              dilation_rate=(rate, rate),
                              name=prefix)
     else:
         kernel_size_effective = kernel_size + (kernel_size - 1) * (rate -
                                                                    1)
         pad_total = kernel_size_effective - 1
         pad_beg = pad_total // 2
         pad_end = pad_total - pad_beg
         self.zp = ZeroPadding2D((pad_beg, pad_end))
         self.conv2a = Conv2D(filters, (kernel_size, kernel_size),
                              strides=(stride, stride),
                              padding='valid',
                              use_bias=False,
                              dilation_rate=(rate, rate),
                              name=prefix)
def inception_block_3b(X):
    X_3x3 = fr_utils.conv2d_bn(X,
                               layer='inception_5b_3x3',
                               cv1_out=96,
                               cv1_filter=(1, 1),
                               cv2_out=384,
                               cv2_filter=(3, 3),
                               cv2_strides=(1, 1),
                               padding=(1, 1))
    X_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X)
    X_pool = fr_utils.conv2d_bn(X_pool,
                                layer='inception_5b_pool',
                                cv1_out=96,
                                cv1_filter=(1, 1))
    X_pool = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_pool)

    X_1x1 = fr_utils.conv2d_bn(X,
                               layer='inception_5b_1x1',
                               cv1_out=256,
                               cv1_filter=(1, 1))
    inception = concatenate([X_3x3, X_pool, X_1x1], axis=1)

    return inception
Esempio n. 7
0
        def __init__(self, imageWidth, imageHeight, isColour=True):

            # supports three channels for rgb else one for greyscale
            colourChannels = 3 if isColour else 1

            # we can infer our input tensor shape from the data.
            self.inputShape = (imageWidth, imageHeight, colourChannels)
            self.Layers = [
                ZeroPadding2D(padding=(0, 0),
                              data_format=None,
                              name="discriminative"),
                Conv2D(10, (3, 3), padding="same"),
                PReLU(alpha_initializer="zeros"),
                Flatten(),
                Dense(20),
                PReLU(alpha_initializer="zeros"),
                Dense(2),
                Activation("softmax"),
            ]

            self.model = Sequential(self.Layers)

            self.compile()
Esempio n. 8
0
def csp_resblock_body(x, num_filters, num_blocks, all_narrow=True):
    """A series of resblocks starting with a downsampling Convolution2D"""
    # 填充x的边界为0,由(?, 416, 416, 32)转换为(?, 417, 417, 32)。
    # 因为下一步卷积操作的步长为2,所以图的边长需要是奇数。
    x = ZeroPadding2D(((1, 0), (1, 0)))(x)
    # 第一个CBM对高和宽进行压缩
    x = darknet_CBM(num_filters, (3, 3), strides=(2, 2))(x)  # darknet中只有卷积层,通过调节卷积步长控制输出特征图的尺寸

    # 残差
    res_connection = darknet_CBM(num_filters//2 if all_narrow else num_filters, (1, 1))(x)
    # 主干
    x = darknet_CBM(num_filters//2 if all_narrow else num_filters, (1, 1))(x)
    for i in range(num_blocks):
        x_blocks = compose(
            darknet_CBM(num_filters//2, (1,1)),
            darknet_CBM(num_filters//2 if all_narrow else num_filters, (3, 3)))(x)
        x = Add()([x, x_blocks])

    x = darknet_CBM(num_filters//2 if all_narrow else num_filters, (1, 1))(x)
    x = Concatenate()([x, res_connection])  # 主干、残差汇合
    x = darknet_CBM(num_filters, (1, 1))(x)

    return x
Esempio n. 9
0
def custom_backbone(input_tensor):

    # input layer
    x = ZeroPadding2D(padding=correct_pad(K, input_tensor, 3))(input_tensor)
    x = Conv2D(32, (3, 3), strides=(2, 2))(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # blocks
    x = twoblocks(x, 128, 192, 64)
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)

    x = twoblocks(x, 192, 256, 96)
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)

    x = twoblocks(x, 256, 384, 128)
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)

    x = twoblocks(x, 384, 512, 384)
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)
    x = twoblocks(x, 512, 768, 256)

    return x
def depthwise_res_block(x, nb_filter, kernel, stride, t, alpha, resdiual=False, name=None):
    input_tensor = x
    exp_channels = x.shape[-1] * t  # 扩展维度
    alpha_channels = int(nb_filter * alpha)  # 压缩维度

    x = conv_block(x, exp_channels, (1, 1), (1, 1), name=name)

    if stride[0] == 2:
        x = ZeroPadding2D(padding=pad_size(x, 3), name=name + '_pad')(x)

    x = DepthwiseConv2D(kernel, padding='same' if stride[0] == 1 else 'valid', strides=stride, depth_multiplier=1,
                        use_bias=False, name=name + '_depthwise')(x)

    x = BatchNormalization(axis=3, name=name + '_depthwise_BN')(x)
    x = Activation(relu6, name=name + '_depthwise_relu')(x)

    x = Conv2D(alpha_channels, (1, 1), padding='same', use_bias=False, strides=(1, 1), name=name + '_project')(x)
    x = BatchNormalization(axis=3, name=name + '_project_BN')(x)

    if resdiual:
        x = layers.add([x, input_tensor], name=name + '_add')

    return x
Esempio n. 11
0
    def layer(input_tensor):
        conv_params = get_conv_params()
        bn_params = get_bn_params()
        conv_name, bn_name, relu_name, sc_name = handle_block_names(
            stage, block)

        x = Conv2D(filters, (1, 1), name=conv_name + '1',
                   **conv_params)(input_tensor)
        x = BatchNormalization(name=bn_name + '1', **bn_params)(x)
        x = Activation('relu', name=relu_name + '1')(x)

        x = ZeroPadding2D(padding=(1, 1))(x)
        x = GroupConv2D(filters, (3, 3), conv_params, conv_name + '2')(x)
        x = BatchNormalization(name=bn_name + '2', **bn_params)(x)
        x = Activation('relu', name=relu_name + '2')(x)

        x = Conv2D(filters * 2, (1, 1), name=conv_name + '3', **conv_params)(x)
        x = BatchNormalization(name=bn_name + '3', **bn_params)(x)

        x = Add()([x, input_tensor])

        x = Activation('relu', name=relu_name)(x)
        return x
def resblock_body(x, num_filters, num_blocks, all_narrow=True):
    '''A series of resblocks starting with a downsampling Convolution2D'''
    # Darknet uses left and top padding instead of 'same' mode
    x = ZeroPadding2D(((1, 0), (1, 0)))(x)
    x = DarknetConv2D_BN_Mish(num_filters, (3, 3), strides=(2, 2))(x)

    res_connection = DarknetConv2D_BN_Mish(
        num_filters // 2 if all_narrow else num_filters, (1, 1))(x)
    x = DarknetConv2D_BN_Mish(num_filters // 2 if all_narrow else num_filters,
                              (1, 1))(x)

    for i in range(num_blocks):
        y = compose(
            DarknetConv2D_BN_Mish(num_filters // 2, (1, 1)),
            DarknetConv2D_BN_Mish(
                num_filters // 2 if all_narrow else num_filters, (3, 3)))(x)
        x = Add()([x, y])

    x = DarknetConv2D_BN_Mish(num_filters // 2 if all_narrow else num_filters,
                              (1, 1))(x)
    x = Concatenate()([x, res_connection])

    return DarknetConv2D_BN_Mish(num_filters, (1, 1))(x)
Esempio n. 13
0
def SepConv_BN(x, filters, prefix, stride=1, kernel_size=3, rate=1, depth_activation=False, epsilon=EPS):
    if stride == 1:
        depth_padding = 'same'
    else:
        kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
        pad_total = kernel_size_effective - 1
        pad_beg = pad_total // 2
        pad_end = pad_total - pad_beg
        x = ZeroPadding2D((pad_beg, pad_end))(x)
        depth_padding = 'valid'
    if not depth_activation:
        x = Activation('relu')(x)
    x = DepthwiseConv2D((kernel_size, kernel_size), strides=(stride, stride), dilation_rate=(rate, rate),
                        padding=depth_padding, use_bias=False, name=prefix + '_depthwise')(x)
    x = BatchNormalization(name=prefix + '_depthwise_BN', epsilon=epsilon)(x)
    if depth_activation:
        x = Activation('relu')(x)
    x = Conv2D(filters, (1, 1), padding='same', use_bias=False, name=prefix + '_pointwise')(x)
    x = BatchNormalization(name=prefix + '_pointwise_BN', epsilon=epsilon)(x)
    if depth_activation:
        x = Activation('relu')(x)

    return x
Esempio n. 14
0
 def __init__(self):
     #self.model=Model()
     inputs = Input(shape=(224, 224, 3))
     x = ZeroPadding2D((3, 3))(inputs)
     x = Conv2D(filters=64, kernel_size=(7, 7), strides=(2, 2))(x)
     x = BatchNormalization(axis=-1)(x)
     x = Activation(activation='relu')(x)
     #x = ZeroPadding2D((1,1))(x)
     x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x)
     x = Basic_block(64, [1, 1], 1)(x)
     x = Basic_block(64, [1, 1], 1)(x)
     x = Basic_block(128, [2, 1], 2)(x)
     x = Basic_block(128, [1, 1], 1)(x)
     x = Basic_block(256, [2, 1], 2)(x)
     x = Basic_block(256, [1, 1], 1)(x)
     x = Basic_block(512, [2, 1], 2)(x)
     x = Basic_block(512, [1, 1], 1)(x)
     x = GlobalAveragePooling2D()(x)
     x = Flatten()(x)
     x = Dense(units=10, activation='softmax')(x)
     self.model = tf.keras.Model(inputs=inputs, outputs=x)
     #plot_model(self.model,r'C:/Users/Zhiyan/Desktop/CNN_model/ResNet18.png',show_shapes=True)
     print(self.model.summary())
Esempio n. 15
0
    def build_discriminator(self):
        def conv2d(x, filters, kernel_size, strides, padding):
            x = ZeroPadding2D(padding=padding)(x)
            x = Conv2D(filters,
                       kernel_size,
                       strides,
                       padding='valid',
                       use_bias=False)(x)
            x = LeakyReLU(0.01)(x)
            return x

        input_img = Input(self.input_shape)
        x = input_img
        filters = 64
        for _ in range(6):
            x = conv2d(x, filters, 4, 2, 1)
            filters = filters * 2

        out_cls = Conv2D(self.num_c, 2, 1, padding='valid', use_bias=False)(x)
        out_cls = Reshape((self.num_c, ))(out_cls)
        x = ZeroPadding2D(padding=1)(x)
        out_src = Conv2D(1, 3, 1, padding='valid', use_bias=False)(x)
        return Model(inputs=input_img, outputs=[out_src, out_cls])
Esempio n. 16
0
def convolutional(inputs, blocks, block, i, filters):
    activation = block["activation"]
    filters = int(block["filters"])
    kernel_size = int(block["size"])
    strides = int(block["stride"])

    if strides > 1:
        inputs = ZeroPadding2D(((1, 0), (1, 0)))(inputs)

    inputs = Conv2D(filters,
                    kernel_size,
                    strides=strides,
                    padding='valid' if strides > 1 else 'same',
                    name='conv_' + str(i),
                    use_bias=False if
                    ("batch_normalize" in block) else True)(inputs)

    if "batch_normalize" in block:
        inputs = BatchNormalization(name='bnorm_' + str(i))(inputs)
    if activation == "leaky":
        inputs = LeakyReLU(alpha=0.1, name='leaky_' + str(i))(inputs)

    return inputs, filters, block
def _decodeBlock(x, shortcut, rows_odd, cols_odd, cweights, bns, activation=LeakyReLU(alpha=ALPHA)):
    #Add zero padding on bottom and right if odd dimension required at output,
    #giving an output of one greater than required
    x = ZeroPadding2D(padding=((0,rows_odd),(0,cols_odd)))(x)
    # x = UpSampling2D(size=(2,2), interpolation=UPSAMPLE_INTERP)(x)

    # up_size = np.array(x.shape)
    # up_size[1] *= 2
    # up_size[2] *= 2
    # x = bicubic_interp_2d(x,(up_size[1],up_size[2]))

    x = upsample_helper(x)
    
    #If padding was added, crop the output to match the target shape
    #print(rows_odd)
    #print(cols_odd)
    x = Cropping2D(cropping=((0,rows_odd),(0,cols_odd)))(x)

    x = Concatenate()([shortcut, x])

    x = res_Block(x, cweights, bns, activation=LeakyReLU(alpha=ALPHA))

    return x
Esempio n. 18
0
    def build_discriminator(self):
        model = Sequential()

        model.add(
            Conv2D(32,
                   kernel_size=3,
                   strides=2,
                   input_shape=(self.image_res, self.image_res, 3),
                   padding="same"))
        model.add(Activation("relu"))

        model.add(Dropout(0.25))
        model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
        model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))

        model.add(Dropout(0.25))
        model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))

        model.add(Dropout(0.25))
        model.add(Conv2D(256, kernel_size=3, strides=2, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))

        model.add(Dropout(0.25))
        model.add(Conv2D(512, kernel_size=3, strides=2, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))

        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(1, activation='sigmoid'))

        self.discriminator = model
Esempio n. 19
0
def build_discriminator(image_shape):
    model = Sequential()

    model.add(
        Conv2D(32,
               kernel_size=3,
               strides=2,
               input_shape=image_shape,
               padding="same"))
    model.add(LeakyReLU(alpha=0.2))

    model.add(Dropout(0.25))
    model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
    model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))
    model.add(BatchNormalization(momentum=0.8))
    model.add(LeakyReLU(alpha=0.2))

    model.add(Dropout(0.25))
    model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
    model.add(BatchNormalization(momentum=0.8))
    model.add(LeakyReLU(alpha=0.2))

    model.add(Dropout(0.25))
    model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
    model.add(BatchNormalization(momentum=0.8))
    model.add(LeakyReLU(alpha=0.2))

    model.add(Dropout(0.25))
    model.add(Conv2D(512, kernel_size=3, strides=1, padding="same"))
    model.add(BatchNormalization(momentum=0.8))
    model.add(LeakyReLU(alpha=0.2))

    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(1, activation='sigmoid'))

    return model
Esempio n. 20
0
def tiny_yolo5_predictions(feature_maps, feature_channel_nums, num_anchors,
                           num_classes, use_spp):
    f1, f2 = feature_maps
    f1_channel_num, f2_channel_num = feature_channel_nums

    #feature map 1 head (13 x 13 x f1_channel_num//2 for 416 input)
    x1 = DarknetConv2D_BN_Mish(f1_channel_num // 2, (1, 1))(f1)
    if use_spp:
        x1 = Spp_Conv2D_BN_Mish(x1, f1_channel_num // 2)

    #upsample fpn merge for feature map 1 & 2
    x1_upsample = compose(DarknetConv2D_BN_Mish(f2_channel_num // 2, (1, 1)),
                          UpSampling2D(2))(x1)
    x2 = compose(
        Concatenate(),
        #Depthwise_Separable_Conv2D_BN_Mish(filters=f2_channel_num, kernel_size=(3, 3), block_id_str='15'),
        DarknetConv2D_BN_Mish(f2_channel_num, (3, 3)))([x1_upsample, f2])

    #feature map 2 output (26 x 26 x f2_channel_num for 416 input)
    y2 = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1),
                       name='predict_conv_2')(x2)

    #downsample fpn merge for feature map 2 & 1
    x2_downsample = compose(
        ZeroPadding2D(((1, 0), (1, 0))),
        #Darknet_Depthwise_Separable_Conv2D_BN_Mish(f1_channel_num//2, (3,3), strides=(2,2), block_id_str='16'),
        DarknetConv2D_BN_Mish(f1_channel_num // 2, (3, 3), strides=(2, 2)))(x2)
    x1 = compose(
        Concatenate(),
        #Depthwise_Separable_Conv2D_BN_Mish(filters=f1_channel_num, kernel_size=(3, 3), block_id_str='17'),
        DarknetConv2D_BN_Mish(f1_channel_num, (3, 3)))([x2_downsample, x1])

    #feature map 1 output (13 x 13 x f1_channel_num for 416 input)
    y1 = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1),
                       name='predict_conv_1')(x1)

    return y1, y2
Esempio n. 21
0
def ResNet50(inputs):

    img_input = inputs
    x = ZeroPadding2D((3, 3))(img_input)
    # 300,300,64
    x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=False)(x)
    x = BatchNormalization(name='bn_conv1')(x)
    x = Activation('relu')(x)
    # 150,150,64
    x = MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
    # 150,150,256
    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
    y0 = x
    # 75,75,512
    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
    y1 = x
    # 38,38,1024
    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')
    y2 = x
    # 19,19,2048
    x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
    y3 = x
    model = Model(img_input, [y0, y1, y2, y3], name='resnet50')

    return model
Esempio n. 22
0
def conv_block(x, stage, branch, nb_filter, dropout_rate=None, weight_decay=1e-4):
    '''Apply BatchNorm, Relu, bottleneck 1x1 Conv2D, 3x3 Conv2D, and option dropout
        # Arguments
            x: input tensor 
            stage: index for dense block
            branch: layer index within each dense block
            nb_filter: number of filters
            dropout_rate: dropout rate
            weight_decay: weight decay factor
    '''
    eps = 1.1e-5
    conv_name_base = 'conv' + str(stage) + '_' + str(branch)
    relu_name_base = 'relu' + str(stage) + '_' + str(branch)

    # 1x1 Convolution (Bottleneck layer)
    inter_channel = nb_filter * 4  
    x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_x1_bn')(x)
    x = Scale(axis=concat_axis, name=conv_name_base+'_x1_scale')(x)
    x = Activation('relu', name=relu_name_base+'_x1')(x)
    x = Convolution2D(inter_channel, kernel_size=(1, 1), name=conv_name_base+'_x1', use_bias=False)(x)

    if dropout_rate:
        x = Dropout(dropout_rate)(x)
    # endif

    # 3x3 Convolution
    x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_x2_bn')(x)
    x = Scale(axis=concat_axis, name=conv_name_base+'_x2_scale')(x)
    x = Activation('relu', name=relu_name_base+'_x2')(x)
    x = ZeroPadding2D((1, 1), name=conv_name_base+'_x2_zeropadding')(x)
    x = Convolution2D(nb_filter, kernel_size=(3, 3), name=conv_name_base+'_x2', use_bias=False)(x)

    if dropout_rate:
        x = Dropout(dropout_rate)(x)
    # endif

    return x
Esempio n. 23
0
def build_discriminator(image_shape= (720,1280,3)):

    model = Sequential()
    model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=image_shape, padding="same"))
    model.add(LeakyReLU(alpha=0.2))

    model.add(Dropout(0.25))
    model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
    model.add(ZeroPadding2D(padding=((0,1),(0,1))))
    model.add(BatchNormalization(momentum=0.8))
    model.add(LeakyReLU(alpha=0.2))

    model.add(Dropout(0.25))
    model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
    model.add(BatchNormalization(momentum=0.8))
    model.add(LeakyReLU(alpha=0.2))

    model.add(Dropout(0.25))
    model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
    model.add(BatchNormalization(momentum=0.8))
    model.add(LeakyReLU(alpha=0.2))

    model.add(Dropout(0.25))
    model.add(Conv2D(512, kernel_size=3, strides=1, padding="same"))
    model.add(BatchNormalization(momentum=0.8))
    model.add(LeakyReLU(alpha=0.2))

    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(1, activation='sigmoid'))
    model.add(Reshape((64, 64, 3)))
    input_image = Input(shape=image_shape)

    validity = model(input_image)

    return Model(input_image, validity, name = "Discriminator")
Esempio n. 24
0
def resblock_body(x, num_filters, num_blocks, all_narrow=True):
    #----------------------------------------------------------------#
    #   利用ZeroPadding2D和一个步长为2x2的卷积块进行高和宽的压缩
    #----------------------------------------------------------------#
    preconv1 = ZeroPadding2D(((1, 0), (1, 0)))(x)
    preconv1 = DarknetConv2D_BN_Mish(num_filters, (3, 3),
                                     strides=(2, 2))(preconv1)

    #--------------------------------------------------------------------#
    #   然后建立一个大的残差边shortconv、这个大残差边绕过了很多的残差结构
    #--------------------------------------------------------------------#
    shortconv = DarknetConv2D_BN_Mish(
        num_filters // 2 if all_narrow else num_filters, (1, 1))(preconv1)

    #----------------------------------------------------------------#
    #   主干部分会对num_blocks进行循环,循环内部是残差结构。
    #----------------------------------------------------------------#
    mainconv = DarknetConv2D_BN_Mish(
        num_filters // 2 if all_narrow else num_filters, (1, 1))(preconv1)
    for i in range(num_blocks):
        y = compose(
            DarknetConv2D_BN_Mish(num_filters // 2, (1, 1)),
            DarknetConv2D_BN_Mish(
                num_filters // 2 if all_narrow else num_filters,
                (3, 3)))(mainconv)
        mainconv = Add()([mainconv, y])
    postconv = DarknetConv2D_BN_Mish(
        num_filters // 2 if all_narrow else num_filters, (1, 1))(mainconv)

    #----------------------------------------------------------------#
    #   将大残差边再堆叠回来
    #----------------------------------------------------------------#
    route = Concatenate()([postconv, shortconv])

    # 最后对通道数进行整合
    return DarknetConv2D_BN_Mish(num_filters, (1, 1))(route)
Esempio n. 25
0
def simple_model(input_image):
    X_input = Input(input_image)
    X = ZeroPadding2D((3, 3))(X_input)

    X = Conv2D(filters=10,
               kernel_size=(5, 5),
               strides=(1, 1),
               padding='valid',
               name='conv_1',
               kernel_initializer=glorot_uniform(seed=0),
               kernel_regularizer=regularizers.l2(0.01))(X)
    X = BatchNormalization(axis=3, name='bn_1')(X)
    X = Activation('relu')(X)

    X = MaxPooling2D(pool_size=(2, 2), name='pool_1')(X)

    X = Flatten()(X)
    X = Dense(units=1,
              activation='sigmoid',
              name='fc',
              kernel_regularizer=regularizers.l2(0.01))(X)

    model = Model(inputs=X_input, outputs=X, name='first_model')
    return model
Esempio n. 26
0
def csp_resblock_body(x, num_filters, num_blocks, all_narrow=True):
    """CSPNet: A New Backbone that can Enhance Learning Capability of CNN

    Args:
        x: inputs feature [batch_size, m, n, c]
        num_filters: a scalar
        num_blocks: a scalar
        all_narrow: boolean

    Returns:
        results: feature [batch_size, m, n, c]
    """
    x = ZeroPadding2D(((1, 0), (1, 0)))(x)

    # pre convolution
    preconv = DarknetConv2D_BN_Mish(num_filters, (3, 3), strides=(2, 2))(x)

    # use 1*1 filter size to group the feature
    main_conv = DarknetConv2D_BN_Mish(
        num_filters // 2 if all_narrow else num_filters, (1, 1))(preconv)
    short_conv = DarknetConv2D_BN_Mish(
        num_filters // 2 if all_narrow else num_filters, (1, 1))(preconv)
    for i in range(num_blocks):
        y = DarknetConv2D_BN_Mish(num_filters // 2, (1, 1))(main_conv)
        y = DarknetConv2D_BN_Mish(
            num_filters // 2 if all_narrow else num_filters, (3, 3))(y)
        main_conv = Add()([main_conv, y])

    # post convolution
    post_conv = DarknetConv2D_BN_Mish(
        num_filters // 2 if all_narrow else num_filters, (1, 1))(main_conv)

    # cross spatial partial concatenate
    csp_concat = Concatenate()([post_conv, short_conv])
    results = DarknetConv2D_BN_Mish(num_filters, (1, 1))(csp_concat)
    return results
Esempio n. 27
0
def ResNet50(inputs):
    # 512x512x3
    x = ZeroPadding2D((3, 3))(inputs)
    # 256,256,64
    x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=False)(x)
    x = BatchNormalization(name='bn_conv1')(x)
    x = Activation('relu')(x)

    # 256,256,64 -> 128,128,64
    x = MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)

    # 128,128,64 -> 128,128,256
    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

    # 128,128,256 -> 64,64,512
    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')

    # 64,64,512 -> 32,32,1024
    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')

    # 32,32,1024 -> 16,16,2048
    x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')

    return x
Esempio n. 28
0
    def layer(input_tensor):
        x = input_tensor
        residual = input_tensor

        # Give the bottleneck
        x = Conv2D(filters // 4, kernel_size=(1,1), kernel_initializer='he_uniform', 
                   strides=strides, use_bias=False)(x)
        x = BatchNormalization(**bn_params)(x)
        x = Activation('relu')(x)

        x = ZeroPadding2D(1)(x)
        x = Conv2D(filters // 4, kernel_size=(3,3), kernel_initializer='he_uniform', use_bias=False)(x)
        x = BatchNormalization(**bn_params)(x)
        x = Activation('relu')(x)

        x = Conv2D(filters, kernel_size=(1,1), kernel_initializer='he_uniform', use_bias=False)(x)
        x = BatchNormalization(**bn_params)(x)

        # If filter # or spatial dimensions changed make same manipulations with residual connection
        x_channels = get_num_channels(x)
        r_channels = get_num_channels(residual)

        if strides != 1 or x_channels != r_channels:
            residual = Conv2D(x_channels, kernel_size=(1,1), strides=strides, 
                              kernel_initializer='he_uniform', use_bias=False)(residual)
            residual = BatchNormalization(**bn_params)(residual)

        # Apply the attention module
        x = ChannelSE(reduction=reduction, **kwargs)(x)

        # Add the residual connection
        x = Add()([x, residual])

        x = Activation('relu')(x)

        return x
    def __init__(self, metalayer, metanet):
        super().__init__(name=metalayer.name)
        self.metalayer = metalayer
        self.metanet = metanet

        if metalayer.stride == 2:
            self.add(ZeroPadding2D(((1, 0), (1, 0))))

        self.add(
            Conv2D(
                filters=metalayer.filters,
                kernel_size=metalayer.size,
                padding="same" if metalayer.stride == 1 else "valid",
                strides=metalayer.stride,
                use_bias=not metalayer.batch_normalize,
                kernel_regularizer=L2(l2=0.005),
                kernel_initializer=tf.random_normal_initializer(stddev=0.01),
                bias_initializer=tf.constant_initializer(0.0),
            ))

        if metalayer.batch_normalize:
            self.add(
                BatchNormalization(epsilon=1e-5,
                                   momentum=self.metanet.momentum))

        if metalayer.activation == "mish":
            self.add(Activation("mish"))
        elif metalayer.activation == "leaky":
            self.add(LeakyReLU(alpha=0.1))
        elif metalayer.activation == "relu":
            self.add(ReLU())
        elif metalayer.activation == "linear":
            pass
        else:
            raise ValueError(
                f"YOLOConv2D: '{metalayer.activation}' is not supported.")
Esempio n. 30
0
def ResNet(backbone, input_shape, classes, stack_fn, preact, **kwargs):

    img_input = x = Input(shape=input_shape, name='main_input')

    x = ZeroPadding2D(padding=((1, 1), (1, 1)), name='conv1_pad')(x)
    x = Conv2D(64,
               3,
               strides=1,
               kernel_regularizer=l2(1e-4),
               use_bias=False,
               name='conv1_conv')(x)
    if not preact:
        x = BatchNormalization(epsilon=1.001e-5, name='conv1_norm')(x)
        x = Activation('relu', name='conv1_acti')(x)

    x = stack_fn(x)
    if preact:
        x = BatchNormalization(epsilon=1.001e-5, name='post_norm')(x)
        x = Activation('relu', name='post_acti')(x)

    x = GlobalAveragePooling2D(name='avg_pool')(x)
    x = Dense(classes, kernel_regularizer=l2(1e-4), name='main_output')(x)
    model = CSKD(img_input, x, name=backbone)
    return model