示例#1
0
def ResNet18(input_shape, classes):

    inputs = Input(shape=input_shape)

    x = ZeroPadding2D(padding=(3, 3), name='conv1_pad')(inputs)
    x = Conv2D(64, (7, 7),
               strides=(2, 2),
               padding='valid',
               kernel_initializer='he_normal',
               name='conv1')(x)
    x = BatchNormalization(axis=3, name='bn_conv1')(x)
    x = Activation('relu')(x)
    x = ZeroPadding2D(padding=(1, 1), name='pool1_pad')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = conv_block(x, 3, 64, stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 3, 64, stage=2, block='b')

    x = conv_block(x, 3, 128, stage=3, block='a')
    x = identity_block(x, 3, 128, stage=3, block='b')

    x = conv_block(x, 3, 256, stage=4, block='a')
    x = identity_block(x, 3, 256, stage=4, block='b')

    x = conv_block(x, 3, 512, stage=5, block='a')
    x = identity_block(x, 3, 512, stage=5, block='b')

    x = GlobalAveragePooling2D(name='avg_pool')(x)
    x = Dense(classes, activation='softmax', name='fc')(x)

    model = Model(inputs, x, name='resnet18')

    return model
def ranknet():
    vgg_model = VGG19(weights="imagenet", include_top=False, input_shape=(224, 224, 3))
    convnet_output = GlobalAveragePooling2D()(vgg_model.output)
    convnet_output = Dense(4096, activation='relu')(convnet_output)
    convnet_output = Dropout(0.5)(convnet_output)
    convnet_output = Dense(4096, activation='relu')(convnet_output)
    convnet_output = Dropout(0.5)(convnet_output)
    convnet_output = Lambda(lambda x: K.l2_normalize(x, axis=1))(convnet_output)

    s1 = MaxPool2D(pool_size=(4, 4), strides=(4, 4), padding='valid')(vgg_model.input)
    s1 = ZeroPadding2D(padding=(4, 4), data_format=None)(s1)
    s1 = Conv2D(96, kernel_size=(8, 8), strides=(4, 4), padding='valid')(s1)
    s1 = ZeroPadding2D(padding=(2, 2), data_format=None)(s1)
    s1 = MaxPool2D(pool_size=(7, 7), strides=(4, 4), padding='valid')(s1)
    s1 = Flatten()(s1)

    s2 = MaxPool2D(pool_size=(8, 8), strides=(8, 8), padding='valid')(vgg_model.input)
    s2 = ZeroPadding2D(padding=(4, 4), data_format=None)(s2)
    s2 = Conv2D(96, kernel_size=(8, 8), strides=(4, 4), padding='valid')(s2)
    s2 = ZeroPadding2D(padding=(1, 1), data_format=None)(s2)
    s2 = MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='valid')(s2)
    s2 = Flatten()(s2)

    merge_one = concatenate([s1, s2])
    merge_one_norm = Lambda(lambda x: K.l2_normalize(x, axis=1))(merge_one)
    merge_two = concatenate([merge_one_norm, convnet_output], axis=1)
    emb = Dense(4096)(merge_two)
    l2_norm_final = Lambda(lambda x: K.l2_normalize(x, axis=1))(emb)

    final_model = tf.keras.models.Model(inputs=vgg_model.input, outputs=l2_norm_final)

    return final_model
def visnet_lrn2d_model():
    vgg_model = VGG16(weights="imagenet", include_top=False, input_shape=(224, 224, 3))
    convnet_output = GlobalAveragePooling2D()(vgg_model.output)
    convnet_output = Dense(4096, activation='relu')(convnet_output)
    convnet_output = Dropout(0.6)(convnet_output)
    convnet_output = Dense(4096, activation='relu')(convnet_output)
    convnet_output = Dropout(0.6)(convnet_output)
    convnet_output = Lambda(lambda x: K.l2_normalize(x, axis=1))(convnet_output)

    first_maxpool = MaxPooling2D(pool_size=4, strides=4)(vgg_model.input)
    first_conv = Conv2D(96, kernel_size=8, strides=4, activation='relu')(first_maxpool)
    first_lrn2d = LRN2D(n=5)(first_conv)
    first_zero_padding = ZeroPadding2D(padding=(3, 3))(first_lrn2d)
    first_maxpool2 = MaxPooling2D(pool_size=7, strides=4, padding='same')(first_zero_padding)
    first_maxpool2 = Flatten()(first_maxpool2)
    first_maxpool2 = Lambda(lambda x: K.l2_normalize(x, axis=1))(first_maxpool2)

    second_maxpool = MaxPooling2D(pool_size=8, strides=8)(vgg_model.input)
    second_conv = Conv2D(96, kernel_size=8, strides=4, activation='relu')(second_maxpool)
    second_lrn2d = LRN2D(n=5)(second_conv)
    second_zero_padding = ZeroPadding2D(padding=(1, 1))(second_lrn2d)
    second_maxpool2 = MaxPooling2D(pool_size=3, strides=2, padding='same')(second_zero_padding)
    second_maxpool2 = Flatten()(second_maxpool2)
    second_maxpool2 = Lambda(lambda x: K.l2_normalize(x, axis=1))(second_maxpool2)

    merge_one = concatenate([first_maxpool2, second_maxpool2])
    merge_two = concatenate([merge_one, convnet_output])
    emb = Dense(4096)(merge_two)
    l2_norm_final = Lambda(lambda x: K.l2_normalize(x, axis=1))(emb)

    final_model = Model(inputs=vgg_model.input, outputs=l2_norm_final)

    return final_model
示例#4
0
def up_projection(lt_, nf, s, block):
    with tf.name_scope('up_' + str(block)):
        if s == 2:
            ht = Conv2DTranspose(nf, 2, strides=2)(lt_)
            ht = PReLU()(ht)
            lt = ZeroPadding2D(2)(ht)
            lt = Conv2D(nf, 6, 2)(lt)
            lt = PReLU()(lt)
            et = Subtract()([lt, lt_])
            ht1 = Conv2DTranspose(nf, 2, strides=2)(et)
            ht1 = PReLU()(ht1)
            ht1 = Add()([ht, ht1])
            return (ht1)
        if s == 4:
            ht = Conv2DTranspose(nf, 4, strides=4)(lt_)
            ht = PReLU()(ht)
            lt = ZeroPadding2D(2)(ht)
            lt = Conv2D(nf, 8, strides=4)(lt)
            lt = PReLU()(lt)
            et = Subtract()([lt, lt_])
            ht1 = Conv2DTranspose(nf, 4, strides=4)(et)
            ht1 = PReLU()(ht1)
            ht1 = Add()([ht, ht1])
            return (ht1)
        if s == 8:
            ht = Conv2DTranspose(nf, 8, strides=8)(lt_)
            ht = PReLU()(ht)
            lt = ZeroPadding2D(2)(ht)
            lt = Conv2D(nf, 12, strides=8)(lt)
            lt = PReLU()(lt)
            et = Subtract()([lt, lt_])
            ht1 = Conv2DTranspose(nf, 8, strides=8)(et)
            ht1 = PReLU()(ht1)
            ht1 = Add()([ht, ht1])
        return (ht1)
示例#5
0
def inception_block_1b(X):
    X_3x3 = Conv2D(96, (1, 1),
                   data_format='channels_first',
                   name='inception_3b_3x3_conv1')(X)
    X_3x3 = BatchNormalization(axis=1,
                               epsilon=0.00001,
                               name='inception_3b_3x3_bn1')(X_3x3)
    X_3x3 = Activation('relu')(X_3x3)
    X_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3)
    X_3x3 = Conv2D(128, (3, 3),
                   data_format='channels_first',
                   name='inception_3b_3x3_conv2')(X_3x3)
    X_3x3 = BatchNormalization(axis=1,
                               epsilon=0.00001,
                               name='inception_3b_3x3_bn2')(X_3x3)
    X_3x3 = Activation('relu')(X_3x3)

    X_5x5 = Conv2D(32, (1, 1),
                   data_format='channels_first',
                   name='inception_3b_5x5_conv1')(X)
    X_5x5 = BatchNormalization(axis=1,
                               epsilon=0.00001,
                               name='inception_3b_5x5_bn1')(X_5x5)
    X_5x5 = Activation('relu')(X_5x5)
    X_5x5 = ZeroPadding2D(padding=(2, 2), data_format='channels_first')(X_5x5)
    X_5x5 = Conv2D(64, (5, 5),
                   data_format='channels_first',
                   name='inception_3b_5x5_conv2')(X_5x5)
    X_5x5 = BatchNormalization(axis=1,
                               epsilon=0.00001,
                               name='inception_3b_5x5_bn2')(X_5x5)
    X_5x5 = Activation('relu')(X_5x5)

    X_pool = AveragePooling2D(pool_size=(3, 3),
                              strides=(3, 3),
                              data_format='channels_first')(X)
    X_pool = Conv2D(64, (1, 1),
                    data_format='channels_first',
                    name='inception_3b_pool_conv')(X_pool)
    X_pool = BatchNormalization(axis=1,
                                epsilon=0.00001,
                                name='inception_3b_pool_bn')(X_pool)
    X_pool = Activation('relu')(X_pool)
    X_pool = ZeroPadding2D(padding=(4, 4),
                           data_format='channels_first')(X_pool)

    X_1x1 = Conv2D(64, (1, 1),
                   data_format='channels_first',
                   name='inception_3b_1x1_conv')(X)
    X_1x1 = BatchNormalization(axis=1,
                               epsilon=0.00001,
                               name='inception_3b_1x1_bn')(X_1x1)
    X_1x1 = Activation('relu')(X_1x1)

    inception = concatenate([X_3x3, X_5x5, X_pool, X_1x1], axis=1)

    return inception
示例#6
0
def create_model():
    model = Sequential([
        ZeroPadding2D((1, 1), input_shape=(50, 200, 3)),
        Conv2D(filters=64,
               kernel_size=(3, 3),
               padding='same',
               activation='relu'),
        ZeroPadding2D((1, 1)),
        Conv2D(filters=64, kernel_size=(3, 3), activation='relu'),
        BatchNormalization(),
        MaxPooling2D(pool_size=(2, 2)),
        Dropout(0.5),
        ZeroPadding2D((1, 1)),
        Conv2D(filters=128,
               kernel_size=(3, 3),
               padding='same',
               activation='relu'),
        ZeroPadding2D((1, 1)),
        Conv2D(filters=128, kernel_size=(3, 3), activation='relu'),
        BatchNormalization(),
        MaxPooling2D(pool_size=(2, 2)),
        Dropout(0.5),
        ZeroPadding2D((1, 1)),
        Conv2D(filters=256,
               kernel_size=(3, 3),
               padding='same',
               activation='relu'),
        ZeroPadding2D((1, 1)),
        Conv2D(filters=256, kernel_size=(3, 3), activation='relu'),
        ZeroPadding2D((1, 1)),
        Conv2D(filters=256, kernel_size=(3, 3), activation='relu'),
        BatchNormalization(),
        MaxPooling2D(pool_size=(2, 2)),
        Dropout(0.5),
        ZeroPadding2D((1, 1)),
        Conv2D(filters=516, kernel_size=(3, 3), activation='relu'),
        ZeroPadding2D((1, 1)),
        Conv2D(filters=516, kernel_size=(3, 3), activation='relu'),
        ZeroPadding2D((1, 1)),
        Conv2D(filters=516, kernel_size=(3, 3), activation='relu'),
        BatchNormalization(),
        MaxPooling2D(pool_size=(2, 2)),
        Flatten(),
        Dropout(0.5)
    ])

    out = [
        Dense(len(LETTERS), name='digit1', activation='softmax')(model.output),
        Dense(len(LETTERS), name='digit2', activation='softmax')(model.output),
        Dense(len(LETTERS), name='digit3', activation='softmax')(model.output),
        Dense(len(LETTERS), name='digit4', activation='softmax')(model.output),
        Dense(len(LETTERS), name='digit5', activation='softmax')(model.output)
    ]

    return Model(inputs=model.inputs, outputs=out)
    def f(x):
        y = ZeroPadding2D(padding=1,
                          name="padding{}{}_branch2a".format(
                              stage_char, block_char))(x)
        y = Conv2D(filters,
                   kernel_size,
                   strides=stride,
                   use_bias=False,
                   name="res{}{}_branch2a".format(stage_char, block_char),
                   **parameters)(y)
        y = kBatchNormalization_Freeze(axis=axis,
                                       epsilon=1e-5,
                                       freeze=freeze_bn,
                                       name="bn{}{}_branch2a".format(
                                           stage_char, block_char))(y)
        y = Activation("relu",
                       name="res{}{}_branch2a_relu".format(
                           stage_char, block_char))(y)

        y = ZeroPadding2D(padding=1,
                          name="padding{}{}_branch2b".format(
                              stage_char, block_char))(y)
        y = Conv2D(filters,
                   kernel_size,
                   use_bias=False,
                   name="res{}{}_branch2b".format(stage_char, block_char),
                   **parameters)(y)
        y = BatchNormalization_Freeze(axis=axis,
                                      epsilon=1e-5,
                                      freeze=freeze_bn,
                                      name="bn{}{}_branch2b".format(
                                          stage_char, block_char))(y)

        if block == 0:
            shortcut = Conv2D(filters, (1, 1),
                              strides=stride,
                              use_bias=False,
                              name="res{}{}_branch1".format(
                                  stage_char, block_char),
                              **parameters)(x)
            shortcut = BatchNormalization_Freeze(axis=axis,
                                                 epsilon=1e-5,
                                                 freeze=freeze_bn,
                                                 name="bn{}{}_branch1".format(
                                                     stage_char,
                                                     block_char))(shortcut)
        else:
            shortcut = x

        y = Add(name="res{}{}".format(stage_char, block_char))([y, shortcut])
        y = Activation("relu",
                       name="res{}{}_relu".format(stage_char, block_char))(y)

        return y
示例#8
0
    def resnet_block(self, input, dim, ks=(3, 3), strides=(1, 1)):
        x = ZeroPadding2D((1, 1))(input)
        x = Conv2D(dim, ks, strides=strides,
                   kernel_initializer=self.conv_init)(x)
        x = InstanceNormalization()(x)
        x = Activation('relu')(x)

        x = ZeroPadding2D((1, 1))(x)
        x = Conv2D(dim, ks, strides=strides,
                   kernel_initializer=self.conv_init)(x)
        x = InstanceNormalization()(x)
        res = Add()([input, x])
        return res
def create_mobilenetv1_ssd(num_classes, is_test=False, is_train=False):
    base_net = MobileNet(input_shape=(config.image_size, config.image_size, 3),
                         include_top=False, weights=None)  # disable dropout layer

    source_layer_indexes = [
        73,
        85,
    ]

    extras = [
        [
            Conv2D(filters=256, kernel_size=1, activation='relu'),
            ZeroPadding2D(padding=1),
            Conv2D(filters=512, kernel_size=3, strides=2, padding="valid", activation='relu'),
        ],
        [
            Conv2D(filters=128, kernel_size=1, activation='relu'),
            Conv2D(filters=256, kernel_size=3, strides=2, padding="same", activation='relu'),
        ],
        [
            Conv2D(filters=128, kernel_size=1, activation='relu'),
            Conv2D(filters=256, kernel_size=3, strides=2, padding="same", activation='relu'),
        ],
        [
            Conv2D(filters=128, kernel_size=1, activation='relu'),
            ZeroPadding2D(padding=1),
            Conv2D(filters=256, kernel_size=3, strides=2, padding="valid", activation='relu'),
        ]
    ]

    regression_headers = [
        Conv2D(filters=6 * 4, kernel_size=3, padding="same"),
        Conv2D(filters=6 * 4, kernel_size=3, padding="same"),
        Conv2D(filters=6 * 4, kernel_size=3, padding="same"),
        Conv2D(filters=6 * 4, kernel_size=3, padding="same"),
        Conv2D(filters=6 * 4, kernel_size=3, padding="same"),
        Conv2D(filters=6 * 4, kernel_size=3, padding="same"),  # TODO: change to kernel_size=1, padding=0?
    ]

    classification_headers = [
        Conv2D(filters=6 * num_classes, kernel_size=3, padding="same", name='conv_extra_1_' + str(6 * num_classes)),
        Conv2D(filters=6 * num_classes, kernel_size=3, padding="same", name='conv_extra_2_' + str(6 * num_classes)),
        Conv2D(filters=6 * num_classes, kernel_size=3, padding="same", name='conv_extra_3_' + str(6 * num_classes)),
        Conv2D(filters=6 * num_classes, kernel_size=3, padding="same", name='conv_extra_4_' + str(6 * num_classes)),
        Conv2D(filters=6 * num_classes, kernel_size=3, padding="same", name='conv_extra_5_' + str(6 * num_classes)),
        Conv2D(filters=6 * num_classes, kernel_size=3, padding="same", name='conv_extra_6_' + str(6 * num_classes)),  # TODO: change to kernel_size=1, padding=0?
    ]

    return SSD(num_classes, base_net, source_layer_indexes,
               extras, classification_headers, regression_headers, is_test=is_test, config=config, is_train=is_train)
示例#10
0
def inverted_res_block(input_tensor, expansion, stride, alpha, filters):
    in_channels = input_tensor.shape.as_list()[-1]
    filters = r(filters * alpha)
    output_tensor = input_tensor

    output_tensor = Conv2D(expansion * in_channels,
                           kernel_size=(1, 1),
                           use_bias=False)(output_tensor)
    output_tensor = BatchNormalization(
        epsilon=batch_norm_eps, momentum=batch_norm_momentum)(output_tensor)
    output_tensor = ReLU(relu_threshold)(output_tensor)

    output_tensor = ZeroPadding2D()(output_tensor)
    output_tensor = DepthwiseConv2D(kernel_size=(3, 3),
                                    strides=stride,
                                    use_bias=False)(output_tensor)
    output_tensor = BatchNormalization(
        epsilon=batch_norm_eps, momentum=batch_norm_momentum)(output_tensor)
    output_tensor = ReLU(relu_threshold)(output_tensor)

    output_tensor = Conv2D(filters, kernel_size=(1, 1),
                           use_bias=False)(output_tensor)
    output_tensor = BatchNormalization(
        epsilon=batch_norm_eps, momentum=batch_norm_momentum)(output_tensor)

    if in_channels == filters and stride == 1:
        output_tensor = Add()([input_tensor, output_tensor])
    return output_tensor
示例#11
0
def inception_block_3b(X):
    X_3x3 = models.fr_utils.conv2d_bn(X,
                                      layer='inception_5b_3x3',
                                      cv1_out=96,
                                      cv1_filter=(1, 1),
                                      cv2_out=384,
                                      cv2_filter=(3, 3),
                                      cv2_strides=(1, 1),
                                      padding=(1, 1))
    X_pool = MaxPooling2D(pool_size=3, strides=2,
                          data_format='channels_first')(X)
    X_pool = models.fr_utils.conv2d_bn(X_pool,
                                       layer='inception_5b_pool',
                                       cv1_out=96,
                                       cv1_filter=(1, 1))
    X_pool = ZeroPadding2D(padding=(1, 1),
                           data_format='channels_first')(X_pool)

    X_1x1 = models.fr_utils.conv2d_bn(X,
                                      layer='inception_5b_1x1',
                                      cv1_out=256,
                                      cv1_filter=(1, 1))
    inception = concatenate([X_3x3, X_pool, X_1x1], axis=1)

    return inception
示例#12
0
def inception_block_2b(X):
    #inception4e
    X_3x3 = models.fr_utils.conv2d_bn(X,
                                      layer='inception_4e_3x3',
                                      cv1_out=160,
                                      cv1_filter=(1, 1),
                                      cv2_out=256,
                                      cv2_filter=(3, 3),
                                      cv2_strides=(2, 2),
                                      padding=(1, 1))
    X_5x5 = models.fr_utils.conv2d_bn(X,
                                      layer='inception_4e_5x5',
                                      cv1_out=64,
                                      cv1_filter=(1, 1),
                                      cv2_out=128,
                                      cv2_filter=(5, 5),
                                      cv2_strides=(2, 2),
                                      padding=(2, 2))

    X_pool = MaxPooling2D(pool_size=3, strides=2,
                          data_format='channels_first')(X)
    X_pool = ZeroPadding2D(padding=((0, 1), (0, 1)),
                           data_format='channels_first')(X_pool)

    inception = concatenate([X_3x3, X_5x5, X_pool], axis=1)

    return inception
示例#13
0
def conv2d_bn(
        x,
        layer=None,
        cv1_out=None,
        cv1_filter=(1, 1),
        cv1_strides=(1, 1),
        cv2_out=None,
        cv2_filter=(3, 3),
        cv2_strides=(1, 1),
        padding=None,
):
    num = '' if cv2_out == None else '1'
    tensor = Conv2D(cv1_out,
                    cv1_filter,
                    strides=cv1_strides,
                    name=layer + '_conv' + num)(x)
    tensor = BatchNormalization(axis=3,
                                epsilon=0.00001,
                                name=layer + '_bn' + num)(tensor)
    tensor = Activation('relu')(tensor)
    if padding == None:
        return tensor
    tensor = ZeroPadding2D(padding=padding)(tensor)
    if cv2_out == None:
        return tensor
    tensor = Conv2D(cv2_out,
                    cv2_filter,
                    strides=cv2_strides,
                    name=layer + '_conv' + '2')(tensor)
    tensor = BatchNormalization(axis=3,
                                epsilon=0.00001,
                                name=layer + '_bn' + '2')(tensor)
    tensor = Activation('relu')(tensor)
    return tensor
示例#14
0
def create_tf_facedetection_fdmobilenet(size, alpha):
    input_tensor = Input(shape=(size, size, 3))
    output_tensor = FDMobileNet(input_tensor, alpha)
    output_tensor = ZeroPadding2D()(output_tensor)
    output_tensor = Conv2D(kernel_size=(3, 3), filters=5)(output_tensor)

    return Model(inputs=input_tensor, outputs=output_tensor)
示例#15
0
    def build_critic(self):

        model = Sequential()

        model.add(
            Conv2D(16,
                   kernel_size=3,
                   strides=2,
                   input_shape=self.img_shape,
                   padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(32, kernel_size=3, strides=2, padding="same"))
        model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(128, kernel_size=3, strides=1, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(1))

        model.summary()

        img = Input(shape=self.img_shape)
        validity = model(img)

        return Model(img, validity)
示例#16
0
def _conv2d_same(x, filters, prefix, stride=1, kernel_size=3, rate=1, trainable=True):
    """Implements right 'same' padding for even kernel sizes
        Without this there is a 1 pixel drift when stride = 2
        Args:
            x: input tensor
            filters: num of filters in pointwise convolution
            prefix: prefix before name
            stride: stride at depthwise conv
            kernel_size: kernel size for depthwise convolution
            rate: atrous rate for depthwise convolution
    """
    if stride == 1:
        return Conv2D(filters,
                      (kernel_size, kernel_size),
                      strides=(stride, stride),
                      padding='same', use_bias=False,
                      dilation_rate=(rate, rate),
                      trainable=trainable,
                      name=prefix)(x)
    else:
        kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
        pad_total = kernel_size_effective - 1
        pad_beg = pad_total // 2
        pad_end = pad_total - pad_beg
        x = ZeroPadding2D((pad_beg, pad_end))(x)
        return Conv2D(filters,
                      (kernel_size, kernel_size),
                      strides=(stride, stride),
                      padding='valid', use_bias=False,
                      dilation_rate=(rate, rate),
                      trainable=trainable,
                      name=prefix)(x)
示例#17
0
文件: DarkNet53.py 项目: muye5/YOLOv3
def conv_block(input, phase, convs, do_skip=True):
    x = input
    count = 0

    for conv in convs:
        if count == (len(convs) - 2) and do_skip:
            skip_connection = x
        count += 1

        if conv['stride'] > 1:
            x = ZeroPadding2D(((1, 0), (1, 0)))(x)
        x = Conv2D(conv['filter'],
                   conv['kernel'],
                   strides=conv['stride'],
                   padding='valid' if conv['stride'] > 1 else 'same',
                   name='conv_' + str(conv['layer_idx']),
                   use_bias=False if conv['norm'] else True)(x)
        if conv['norm']:
            x = batch_normalization(inputs=x,
                                    training=phase,
                                    name='norm_' + str(conv['layer_idx']))
        if conv['leaky']:
            x = leaky_relu(x, alpha=0.1)

    return add([skip_connection, x]) if do_skip else x
示例#18
0
def mobile_net_v2(input_tensor, n_classes):
    x = ZeroPadding2D(((0, 1), (0, 1)))(input_tensor)
    x = convolution_block(x, 32, (3, 3), (2, 2), 'valid')
    x = bottleneck(x, 16, (1, 1), 1)
    x = bottleneck(x, 24, (2, 2))
    x = bottleneck(x, 24, (1, 1))
    x = bottleneck(x, 32, (2, 2))
    x = bottleneck(x, 32, (1, 1))
    x = bottleneck(x, 32, (1, 1))
    x = bottleneck(x, 64, (2, 2))
    for i in range(3):
        x = bottleneck(x, 64, (1, 1))
    for i in range(3):
        x = bottleneck(x, 96, (1, 1))
    x = bottleneck(x, 160, (2, 2))
    x = bottleneck(x, 160, (1, 1))
    x = bottleneck(x, 160, (1, 1))
    x = bottleneck(x, 320, (1, 1))
    x = convolution_block(x, 1280, (1, 1), (1, 1), 'same')
    x = AveragePooling2D((7, 7))(x)
    x = Flatten()(x)
    x = Dense(1000)(x)
    x = Activation('relu')(x)
    x = Dense(n_classes, activation='softmax')(x)
    return x
def discriminator_model(img_shape):
    model = Sequential()
    model.add(
        Conv2D(32,
               kernel_size=3,
               strides=2,
               input_shape=img_shape,
               padding="same"))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Dropout(0.25))
    model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
    model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Dropout(0.25))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Dropout(0.25))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(1, activation='sigmoid'))

    model.summary()

    img = Input(shape=img_shape)
    validity = model(img)
    return Model(img, validity)
    def __generate_encoder(self, input_layer):
        # Block 1: (512, 512, 3) -> (128, 128, 64)
        x = ZeroPadding2D(3)(input_layer)
        x = Conv2D(64, kernel_size=7, strides=2, padding='valid')(x)
        x = BatchNormalization()(x)
        x = MaxPooling2D(pool_size=3, strides=2, padding='same')(x)

        # Block 2: (128, 128, 64) -> (128, 128, 64)
        x = self.__res_block(x, filter_n=64, strides=1)
        x = self.__res_block(x, filter_n=64, strides=1)
        x_1 = self.__res_block(x, filter_n=64, strides=1)

        # Block 3: (128, 128, 64) -> (64, 64, 128)
        x = self.__res_block(x_1, filter_n=128, strides=2)  # projection shortcut
        x = self.__res_block(x, filter_n=128, strides=1)
        x = self.__res_block(x, filter_n=128, strides=1)
        x_2 = self.__res_block(x, filter_n=128, strides=1)

        # Block 4: (64, 64, 128) -> (32, 32, 256)
        x = self.__res_block(x_2, filter_n=256, strides=2)  # projection shortcut
        x = self.__res_block(x, filter_n=256, strides=1)
        x = self.__res_block(x, filter_n=256, strides=1)
        x = self.__res_block(x, filter_n=256, strides=1)
        x = self.__res_block(x, filter_n=256, strides=1)
        x_3 = self.__res_block(x, filter_n=256, strides=1)

        # Block 5: (64, 64, 128) -> (16, 16, 512)
        x = self.__res_block(x_3, filter_n=512, strides=2)  # projection shortcut
        x = self.__res_block(x, filter_n=512, strides=1)
        x = self.__res_block(x, filter_n=512, strides=1)

        return x_1, x_2, x_3, x
示例#21
0
def _conv_block(inp, convs, skip=True):
    x = inp
    count = 0

    for conv in convs:
        if count == (len(convs) - 2) and skip:
            skip_connection = x
        count += 1

        if conv['stride'] > 1:
            x = ZeroPadding2D(
                ((1, 0),
                 (1, 0)))(x)  # peculiar padding as darknet prefer left and top
        x = Conv2D(
            conv['filter'],
            conv['kernel'],
            strides=conv['stride'],
            padding='valid' if conv['stride'] > 1 else
            'same',  # peculiar padding as darknet prefer left and top
            name='conv_' + str(conv['layer_idx']),
            use_bias=False if conv['bnorm'] else True)(x)
        if conv['bnorm']:
            x = BatchNormalization(epsilon=0.001,
                                   name='bnorm_' + str(conv['layer_idx']))(x)
        if conv['leaky']:
            x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']))(x)

    return add([skip_connection, x]) if skip else x
    def conv_bottleneck_ds(x,
                           kernel,
                           filters,
                           downsample,
                           name,
                           padding='same',
                           bottleneck=0.5):
        """
            Bottleneck -> Depthwise Separable
            (Pointwise->Depthwise->Pointswise)
            MobileNetV2 style
            """
        if padding == 'valid':
            pad = ((0, kernel[0] // 2), (0, kernel[0] // 2))
            x = ZeroPadding2D(padding=pad, name=name + 'pad')(x)

        x = Conv2D(int(filters * bottleneck), (1, 1),
                   padding='same',
                   strides=downsample,
                   name=name + '_pw')(x)
        add_common(x, name + '_pw')

        x = SeparableConv2D(filters,
                            kernel,
                            padding=padding,
                            strides=(1, 1),
                            name=name + '_ds')(x)
        return add_common(x, name + '_ds')
示例#23
0
def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)):
    """Adds an initial convolution layer (with batch normalization and relu6).

  Arguments:
      inputs: Input tensor of shape `(rows, cols, 3)`
          (with `channels_last` data format) or
          (3, rows, cols) (with `channels_first` data format).
          It should have exactly 3 inputs channels,
          and width and height should be no smaller than 32.
          E.g. `(224, 224, 3)` would be one valid value.
      filters: Integer, the dimensionality of the output space
          (i.e. the number of output filters in the convolution).
      alpha: controls the width of the network.
          - If `alpha` < 1.0, proportionally decreases the number
              of filters in each layer.
          - If `alpha` > 1.0, proportionally increases the number
              of filters in each layer.
          - If `alpha` = 1, default number of filters from the paper
               are used at each layer.
      kernel: An integer or tuple/list of 2 integers, specifying the
          width and height of the 2D convolution window.
          Can be a single integer to specify the same value for
          all spatial dimensions.
      strides: An integer or tuple/list of 2 integers,
          specifying the strides of the convolution along the width and height.
          Can be a single integer to specify the same value for
          all spatial dimensions.
          Specifying any stride value != 1 is incompatible with specifying
          any `dilation_rate` value != 1.

  Input shape:
      4D tensor with shape:
      `(samples, channels, rows, cols)` if data_format='channels_first'
      or 4D tensor with shape:
      `(samples, rows, cols, channels)` if data_format='channels_last'.

  Output shape:
      4D tensor with shape:
      `(samples, filters, new_rows, new_cols)` if data_format='channels_first'
      or 4D tensor with shape:
      `(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
      `rows` and `cols` values might have changed due to stride.

  Returns:
      Output tensor of block.
  """
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
    filters = int(filters * alpha)
    x = ZeroPadding2D(padding=(1, 1), name='conv1_pad')(inputs)
    x = Conv2D(filters,
               kernel,
               padding='valid',
               use_bias=False,
               strides=strides,
               name='conv1')(x)
    x = BatchNormalization(axis=channel_axis, name='conv1_bn')(x)
    return Activation(relu6, name='conv1_relu')(x)
示例#24
0
def create_vgg_ssd(num_classes, is_test=False, is_train=False):
    base_net = VGG16(input_shape=(config.image_size, config.image_size, 3),
                     include_top=False, weights=None)
    # Add extra SSD layers
    vgg_output = base_net.output
    x = MaxPool2D(pool_size=3, strides=1, padding="same")(vgg_output)
    x = Conv2D(filters=1024, kernel_size=3, padding="same", dilation_rate=(6, 6), activation='relu')(x)
    output = Conv2D(filters=1024, kernel_size=1, padding="same", activation='relu')(x)
    base_net = Model(inputs=base_net.inputs, outputs=output)

    source_layer_indexes = [
        (14, BatchNormalizationV2(epsilon=1e-5)),
        len(base_net.layers),
    ]
    extras = [
        [
            Conv2D(filters=256, kernel_size=1, padding='same', activation='relu'),
            Conv2D(filters=512, kernel_size=3, strides=2, padding="same", activation='relu'),
        ],
        [
            Conv2D(filters=128, kernel_size=1, padding="same", activation='relu'),
            ZeroPadding2D(padding=1),
            Conv2D(filters=256, kernel_size=3, strides=2, activation='relu'),
        ],
        [
            Conv2D(filters=128, kernel_size=1, padding='same', activation='relu'),
            Conv2D(filters=256, kernel_size=3, activation='relu'),
        ],
        [
            Conv2D(filters=128, kernel_size=1, padding='same', activation='relu'),
            Conv2D(filters=256, kernel_size=3, activation='relu'),
        ]
    ]

    regression_headers = [
        Conv2D(filters=4 * 4, kernel_size=3, padding="same"),
        Conv2D(filters=6 * 4, kernel_size=3, padding="same"),
        Conv2D(filters=6 * 4, kernel_size=3, padding="same"),
        Conv2D(filters=6 * 4, kernel_size=3, padding="same"),
        Conv2D(filters=4 * 4, kernel_size=3, padding="same"),
        Conv2D(filters=4 * 4, kernel_size=3, padding="same"),
        # TODO: change to kernel_size=1, padding=0?
    ]

    classification_headers = [
        Conv2D(filters=4 * num_classes, kernel_size=3, padding="same"),
        Conv2D(filters=6 * num_classes, kernel_size=3, padding="same"),
        Conv2D(filters=6 * num_classes, kernel_size=3, padding="same"),
        Conv2D(filters=6 * num_classes, kernel_size=3, padding="same"),
        Conv2D(filters=4 * num_classes, kernel_size=3, padding="same"),
        Conv2D(filters=4 * num_classes, kernel_size=3, padding="same"),
        # TODO: change to kernel_size=1, padding=0?
    ]

    return SSD(num_classes, base_net, source_layer_indexes,
               extras, classification_headers, regression_headers, is_test=is_test, config=config, is_train=is_train)
示例#25
0
def SepConv_BN(x,
               filters,
               prefix,
               stride=1,
               kernel_size=3,
               rate=1,
               depth_activation=False,
               epsilon=1e-3,
               regularizer_l1=0.0,
               regularizer_l2=0.0):
    """ SepConv with BN between depthwise & pointwise. Optionally add activation after BN
        Implements right "same" padding for even kernel sizes
        Args:
            x: input tensor
            filters: num of filters in pointwise convolution
            prefix: prefix before name
            stride: stride at depthwise conv
            kernel_size: kernel size for depthwise convolution
            rate: atrous rate for depthwise convolution
            depth_activation: flag to use activation between depthwise & poinwise convs
            epsilon: epsilon to use in BN layer
    """

    if stride == 1:
        depth_padding = 'same'
    else:
        kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
        pad_total = kernel_size_effective - 1
        pad_beg = pad_total // 2
        pad_end = pad_total - pad_beg
        x = ZeroPadding2D((pad_beg, pad_end))(x)
        depth_padding = 'valid'

    if not depth_activation:
        x = Activation('relu')(x)
    x = DepthwiseConv2D((kernel_size, kernel_size),
                        strides=(stride, stride),
                        dilation_rate=(rate, rate),
                        padding=depth_padding,
                        use_bias=False,
                        name=prefix + '_depthwise',
                        kernel_regularizer=l1_l2(regularizer_l1,
                                                 regularizer_l2))(x)
    x = BatchNormalization(name=prefix + '_depthwise_BN', epsilon=epsilon)(x)
    if depth_activation:
        x = Activation('relu')(x)
    x = Conv2D(filters, (1, 1),
               padding='same',
               use_bias=False,
               name=prefix + '_pointwise',
               kernel_regularizer=l1_l2(regularizer_l1, regularizer_l2))(x)
    x = BatchNormalization(name=prefix + '_pointwise_BN', epsilon=epsilon)(x)
    if depth_activation:
        x = Activation('relu')(x)

    return x
示例#26
0
def create_tf_facedetection_mobilenetv2(size, alpha):
    input_tensor = Input(shape=(size, size, 3))
    output_tensor = MobileNetV2(weights=None,
                                include_top=False,
                                input_tensor=input_tensor,
                                alpha=alpha).output
    output_tensor = ZeroPadding2D()(output_tensor)
    output_tensor = Conv2D(kernel_size=(3, 3), filters=5)(output_tensor)

    return Model(inputs=input_tensor, outputs=output_tensor)
示例#27
0
def resblock_body(x, num_filters, num_blocks):
    '''A series of resblocks starting with a downsampling Convolution2D'''
    # Darknet uses left and top padding instead of 'same' mode
    x = ZeroPadding2D(((1, 0), (1, 0)))(x)
    x = DarknetConv2D_BN_Leaky(num_filters, (3, 3), strides=(2, 2))(x)
    for i in range(num_blocks):
        y = compose(DarknetConv2D_BN_Leaky(num_filters // 2, (1, 1)),
                    DarknetConv2D_BN_Leaky(num_filters, (3, 3)))(x)
        x = Add()([x, y])
    return x
示例#28
0
def resblock_body(x, num_filters, num_blocks):
    # プーリング目的で畳み込み層を利用するためのパディング
    x = ZeroPadding2D(((1, 0), (1, 0)))(x)
    # プーリング目的の畳み込み層
    x = DarknetConv2D_BN_Leaky(num_filters, (3, 3), strides=(2, 2))(x)
    for i in range(num_blocks):
        y = compose(DarknetConv2D_BN_Leaky(num_filters // 2, (1, 1)),
                    DarknetConv2D_BN_Leaky(num_filters, (3, 3)))(x)
        x = Add()([x, y])
    return x
示例#29
0
def ResBlock(x, num_filters, num_blocks):
    x = ZeroPadding2D(((1, 0), (1, 0)))(x)
    x = YOLOConv2D_BN_Leaky(num_filters, (3, 3), strides=(2, 2))(x)

    for i in range(num_blocks):
        y = compose(YOLOConv2D_BN_Leaky(num_filters // 2, (1, 1)),
                    YOLOConv2D_BN_Leaky(num_filters, (3, 3)))(x)
        x = Add()([x, y])

    return x
示例#30
0
文件: model.py 项目: EpiSci/M-OOD
def f_network_decoder(opt: 'parser'):
    """Conditional decoder
    :param opt: parser
    :return: keras Model
    """
    model = Sequential(name="f_decoder")
    # layer 4
    model.add(ZeroPadding2D(1))
    model.add(Conv2DTranspose(opt.nfeature, (3, 3), (1, 1), "valid"))
    model.add(BatchNormalization())
    model.add(ReLU())
    # layer 5
    model.add(ZeroPadding2D(1))
    model.add(Conv2DTranspose(opt.nfeature, (3, 3), (1, 1), "valid"))
    model.add(BatchNormalization())
    model.add(ReLU())
    # layer 6
    model.add(ZeroPadding2D(1))
    model.add(Conv2DTranspose(opt.n_out, (4, 4), (2, 2), "valid"))
    return model