def vgg_(in_shape=(227, 227, 3),
         n_classes=1000,
         opt='sgd',
         n_stages_per_blocks=[2, 2, 3, 3, 3]):
    in_layer = layers.Input(in_shape)

    block1 = block(in_layer, 64, n_stages_per_blocks[0])
    pool1 = layers.MaxPool2D()(block1)
    block2 = block(pool1, 128, n_stages_per_blocks[1])
    pool2 = layers.MaxPool2D()(block2)
    block3 = block(pool2, 256, n_stages_per_blocks[2])
    pool3 = layers.MaxPool2D()(block3)
    block4 = block(pool3, 512, n_stages_per_blocks[3])
    pool4 = layers.MaxPool2D()(block4)
    block5 = block(pool4, 512, n_stages_per_blocks[4])
    pool5 = layers.MaxPool2D()(block5)
    flattened = layers.GlobalAvgPool2D()(pool5)

    dense1 = layers.Dense(4096, activation='relu')(flattened)
    dense2 = layers.Dense(4096, activation='relu')(dense1)
    preds = layers.Dense(n_classes, activation='softmax')(dense2)

    model = Model(in_layer, preds)
    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])
    return model
Пример #2
0
def _resnet(block, blocks_num, im_width=224, im_height=224, num_classes=2, include_top=True):
    # tensorflow中的tensor通道排序是NHWC
    # (None, 224, 224, 3)
    input_image = layers.Input(shape=(im_height, im_width, 3), dtype="float32")
    x = layers.Conv2D(filters=64, kernel_size=7, strides=2,
                      padding="SAME", use_bias=False, name="conv1")(input_image)
    x = layers.BatchNormalization(momentum=0.9, epsilon=1e-5, name="conv1/BatchNorm")(x)
    x = layers.ReLU()(x)
    x = layers.MaxPool2D(pool_size=3, strides=2, padding="SAME")(x)

    x = _make_layer(block, x.shape[-1], 64, blocks_num[0], name="block1")(x)
    x = _make_layer(block, x.shape[-1], 128, blocks_num[1], strides=2, name="block2")(x)
    x = _make_layer(block, x.shape[-1], 256, blocks_num[2], strides=2, name="block3")(x)
    x = _make_layer(block, x.shape[-1], 512, blocks_num[3], strides=2, name="block4")(x)

    if include_top:
        x = layers.GlobalAvgPool2D()(x)  # pool + flatten
        x = layers.Dense(num_classes, name="logits")(x)
        predict = layers.Softmax()(x)
    else:
        predict = x

    model = Model(inputs=input_image, outputs=predict)

    return model
 def _build_model(self):
     backbone_model = self._get_backbone_model()
     assert backbone_model is not None, f'backbone should be one of {list(_FEATURE_LAYERS.keys())}'
     x = layers.concatenate([layers.GlobalAvgPool2D()(backbone_model.output),
                             layers.GlobalMaxPool2D()(backbone_model.output)])
     o = layers.Dense(self.n_classes, activation='sigmoid', name='classification_output')(x)
     self.model = models.Model(inputs=backbone_model.input, outputs=o)
Пример #4
0
    def _cifar_resnet(input_shape,
                      num_classes,
                      normalization,
                      num_stacks,
                      mode,
                      name,
                      weight_decay=5e-4,
                      **kwargs):
        if normalization.lower() not in ['batchnorm', 'unitization']:
            raise Exception('Unknown normalization: {}'.format(normalization))

        if normalization.lower() == 'batchnorm':
            kwargs.pop('image_size', None)
        else:
            kwargs.setdefault('image_size', 32**2)

        inputs = Kl.Input(input_shape, name=name + '-inputs')

        tensor = inputs
        tensor = get_convolutional_layer(16, (3, 3), (1, 1),
                                         name + '-pre-conv',
                                         weight_decay)(tensor)

        tensor = ResNets._norm_relu(tensor, normalization, name + '-pre-',
                                    **kwargs)

        filters = [16] * num_stacks \
            + [32] * num_stacks \
            + [64] * num_stacks
        strides = [(1, 1)] * num_stacks \
            + [(2, 2)] + [(1, 1)] * (num_stacks - 1) \
            + [(2, 2)] + [(1, 1)] * (num_stacks - 1)

        Block._block_count = 0
        for filter, stride in zip(filters, strides):
            tensor = Block(filter=filter,
                           stride=stride,
                           mode=mode,
                           normalization=normalization,
                           weight_decay=weight_decay,
                           model_name=name)(tensor, **kwargs)
            if 'image_size' in kwargs:
                if isinstance(stride, int):
                    kwargs['image_size'] //= stride
                else:
                    kwargs['image_size'] //= np.prod(stride)

        tensor = ResNets._norm_relu(tensor, normalization, name + '-fin-',
                                    **kwargs)
        tensor = Kl.GlobalAvgPool2D(name=name + '-fin-pool')(tensor)
        outputs = Kl.Dense(num_classes,
                           activation='softmax',
                           kernel_initializer='he_normal',
                           kernel_regularizer=Kr.l2(weight_decay),
                           name=name + '-outputs')(tensor)

        return Km.Model(inputs, outputs, name=name)
Пример #5
0
    def __init__(self, classes):
        super(ResNet, self).__init__()
        self.conv7 = layers.Conv2D(64, 7, padding='same', name='conv7')
        self.bn = layers.BatchNormalization()
        self.act = layers.Activation('relu')
        self.max_pool = layers.MaxPool2D((3, 3))

        self.ib1 = IdentityBlock(64, 3, name='ID1')
        self.ib2 = IdentityBlock(64, 3, name='ID2')

        self.global_pool = layers.GlobalAvgPool2D()
        self.classifier = layers.Dense(classes, activation='softmax')
Пример #6
0
def resnet50_head(features):
    filters = 512
    x = features
    x = conv_block_5d(x,
                      3, [filters, filters, filters * 4],
                      stage=5,
                      block='a',
                      strides=(2, 2))
    x = identity_block_5d(x,
                          3, [filters, filters, filters * 4],
                          stage=5,
                          block='b')
    x = identity_block_5d(x,
                          3, [filters, filters, filters * 4],
                          stage=5,
                          block='c')
    # 全局平均池化(batch_size,roi_num,channels)
    x = layers.TimeDistributed(layers.GlobalAvgPool2D())(x)
    return x
Пример #7
0
    def build(self, input_shape):
        self.channels = input_shape[-1]
        if self.channels is None:
            raise ValueError(
                'Channel dimension of the inputs should be defined. Found `None`.'
            )

        self.avg = models.Sequential([
            layers.GlobalAvgPool2D(keepdims=True),
            layers.BatchNormalization(),  # TODO: not fuse?
            layers.Dropout(self.confidence)
        ])
        self.qkv = SameConv(self.channels * 3, 1, use_bias=False)
        self.proj = SameConv(self.channels,
                             1,
                             use_bias=False,
                             activation='sigmoid')

        super().build(input_shape)
Пример #8
0
def inception_net(in_shape=(224, 224, 3), n_classes=1000, opt='sgd'):
    in_layer = layers.Input(in_shape)

    conv1 = layers.Conv2D(64, 7, strides=2, activation='relu',
                          padding='same')(in_layer)
    pad1 = layers.ZeroPadding2D()(conv1)
    pool1 = layers.MaxPool2D(3, 2)(pad1)
    conv2_1 = conv1x1(64)(pool1)
    conv2_2 = conv3x3(192)(conv2_1)
    pad2 = layers.ZeroPadding2D()(conv2_2)
    pool2 = layers.MaxPool2D(3, 2)(pad2)

    inception3a = inception_module(pool2, 64, 96, 128, 16, 32, 32)
    inception3b = inception_module(inception3a, 128, 128, 192, 32, 96, 64)
    pad3 = layers.ZeroPadding2D()(inception3b)
    pool3 = layers.MaxPool2D(3, 2)(pad3)

    inception4a = inception_module(pool3, 192, 96, 208, 16, 48, 64)
    inception4b = inception_module(inception4a, 160, 112, 224, 24, 64, 64)
    inception4c = inception_module(inception4b, 128, 128, 256, 24, 64, 64)
    inception4d = inception_module(inception4c, 112, 144, 288, 32, 48, 64)
    inception4e = inception_module(inception4d, 256, 160, 320, 32, 128, 128)
    pad4 = layers.ZeroPadding2D()(inception4e)
    pool4 = layers.MaxPool2D(3, 2)(pad4)

    aux_clf1 = aux_clf(inception4a)
    aux_clf2 = aux_clf(inception4d)

    inception5a = inception_module(pool4, 256, 160, 320, 32, 128, 128)
    inception5b = inception_module(inception5a, 384, 192, 384, 48, 128, 128)
    pad5 = layers.ZeroPadding2D()(inception5b)
    pool5 = layers.MaxPool2D(3, 2)(pad5)

    avg_pool = layers.GlobalAvgPool2D()(pool5)
    dropout = layers.Dropout(0.4)(avg_pool)
    preds = layers.Dense(1000, activation='softmax')(dropout)

    model = Model(in_layer, [preds, aux_clf1, aux_clf2])
    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])
    return model
Пример #9
0
 def __init__(self):
     super(Channel_attention, self).__init__()
     self.reduction_ratio = 0.125
     self.channel_axis = 1 if K.image_data_format(
     ) == "channels_first" else 3
     self.maxpool = KL.GlobalMaxPooling2D()
     self.avgpool = KL.GlobalAvgPool2D()
     self.sigmoid = KL.Activation('sigmoid')
     self.channel = 128
     self.Dense_One = KL.Dense(units=int(self.channel *
                                         self.reduction_ratio),
                               activation='relu',
                               kernel_initializer='he_normal',
                               use_bias=True,
                               bias_initializer='zeros')
     self.Dense_Two = KL.Dense(units=int(self.channel),
                               activation='relu',
                               kernel_initializer='he_normal',
                               use_bias=True,
                               bias_initializer='zeros')
Пример #10
0
def cifar_base_net(inputs):
    x = layers.Conv2D(32, (3, 3), padding='same', name='conv_1')(inputs)
    x = layers.BatchNormalization(axis=3, name='bn_conv1')(x)
    x = layers.Activation('relu')(x)
    x = layers.Conv2D(32, (3, 3), name='conv_2')(x)
    x = layers.Activation('relu')(x)
    x = layers.MaxPooling2D(pool_size=(2, 2))(x)
    x = layers.Dropout(0.25)(x)

    x = layers.Conv2D(64, (3, 3), padding='same', name='conv_3')(x)
    x = layers.Activation('relu')(x)
    x = layers.Conv2D(64, (3, 3), name='conv_4')(x)
    x = layers.Activation('relu')(x)
    x = layers.MaxPooling2D(pool_size=(2, 2))(x)
    x = layers.Dropout(0.25)(x)

    x = layers.Conv2D(512, (3, 3), name='conv_5')(x)
    x = layers.Activation('relu')(x)
    x = layers.GlobalAvgPool2D()(x)

    return x
Пример #11
0
    def build(self, input_shape):
        channels = input_shape[-1]
        if channels is None:
            raise ValueError(
                'Channel dimension of the inputs should be defined. Found `None`.'
            )
        self.input_spec = layers.InputSpec(ndim=4, axes={-1: channels})

        self.attend = models.Sequential([
            layers.GlobalAvgPool2D(keepdims=True),
            SameConv(channels,
                     1,
                     activation='sigmoid',
                     use_bias=False,
                     kernel_initializer='he_uniform')
        ])
        self.conv = SameConv(self.filters,
                             1,
                             use_bias=False,
                             kernel_initializer='he_uniform')

        super().build(input_shape)
def _post_res_blocks(in_tensor, n_classes):
   pool = layers.GlobalAvgPool2D()(in_tensor)
   preds = layers.Dense(n_classes, activation='softmax')(pool)
   return preds
    layers.SeparableConv2D(32,
                           3,
                           activation='relu',
                           input_shape=(
                               height,
                               width,
                               channels,
                           )))
model.add(layers.SeparableConv2D(64, 3, activation='relu'))
model.add(layers.MaxPool2D(2))
model.add(layers.SeparableConv2D(64, 3, activation='relu'))
model.add(layers.SeparableConv2D(128, 3, activation='relu'))
model.add(layers.MaxPool2D(2))
model.add(layers.SeparableConv2D(64, 3, activation='relu'))
model.add(layers.SeparableConv2D(128, 3, activation='relu'))
model.add(layers.GlobalAvgPool2D())
model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dense(num_classes, activation='softmax'))
model.summary()
model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['acc'])

#%% 超参数优化问题
# 可以尝试使用Python的Hyperopt库或Hyperas库,后者将Hyperopt与Keras模型集成

#%% 模型集成(model ensembling)
# 将一组分类器结果汇集在一起,即分类器集成(ensemble the calssfiers),可以得到
# 好于所以模型的结果,最简单的方式是取平均值,更聪明的方式是加权平均
# (采用Nelder-Mead方法优化)
# 另一种集成方式是宽且深(wide and deep)的模型类型,它集合了深度学习和浅层学习
Пример #14
0
#	horizontal_flip=True, fill_mode="nearest")

input_shape = (224, 224)
model_input_shape = (224, 224, 3)
model_input = Input(shape=model_input_shape, name = 'image')
x = layers.Conv2D( filters = 32, kernel_size = (3, 3), strides=2, padding='same', activation = 'relu', use_bias=False, kernel_initializer = he_normal(seed = 426))(model_input)
x = layers.BatchNormalization()(x)
x = layers.Conv2D( filters = 64, kernel_size = (3, 3), strides=2, padding='same', activation = 'relu', use_bias=False,kernel_initializer = he_normal(seed = 426))(x)
x = layers.BatchNormalization()(x)
x = layers.Conv2D( filters = 128, kernel_size = (3, 3), strides=2, padding='same', activation = 'relu', use_bias=False,kernel_initializer = he_normal(seed = 426))(x)
x = layers.BatchNormalization()(x)
x = layers.Conv2D( filters = 256, kernel_size = (3, 3), strides=2, padding='same', activation = 'relu', use_bias=False,kernel_initializer = he_normal(seed = 426))(x)
x = layers.BatchNormalization()(x)
x = layers.Conv2D( filters = 512, kernel_size = (3, 3), strides=2, padding='same', activation = 'relu', use_bias=False,kernel_initializer = he_normal(seed = 426))(x)
x = layers.BatchNormalization()(x)
x = layers.GlobalAvgPool2D()(x)
x = layers.GaussianNoise(0.1)(x)
x = layers.Dropout(0.2)(x)
x = layers.Dense(2, activation = 'softmax', name = 'top')(x)

model = Model(model_input, x)
model.summary()
model.compile(adam(), loss = 'binary_crossentropy')


"""
ABC-Mart....
"""

from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
log_dir = 'logs/oc_cnn/'