def train_test_model(logdir, hparams):
    filter_kernel_2 = json.loads(hparams['filter_kernel_2'])
    
    inputs = keras.Input(shape=(x_train[0].shape[0], x_train[0].shape[1], 1), name='c3_input')
    model = layers.Conv2D(filters=int(hparams['filter_1']), kernel_size=int(hparams['kernel_1']), activation='relu')(inputs)
    model = layers.MaxPooling2D(pool_size=2)(model)
    model = layers.Dropout(0.4)(model)
    if int(filter_kernel_2[0]) > 0:
        model = layers.Conv2D(filters=int(filter_kernel_2[0]), kernel_size=int(filter_kernel_2[1]), activation='relu')(model)
        model = layers.MaxPooling2D(pool_size=2)(model)
        model = layers.Dropout(0.4)(model)
    model = layers.GlobalAvgPool2D()(model)
    model = layers.Dense(hparams['units_1'], activation='relu')(model)
    if int(hparams['units_2']) > 0:
        model = layers.Dense(int(hparams['units_2']), activation='relu')(model)
    model = layers.Dense(1, activation='sigmoid')(model)
    model = Model(inputs=inputs, outputs=model, name='c3_model')

    model.compile(optimizer=optimizers.Adam(learning_rate=hparams['lr'], decay=0.001), loss='binary_crossentropy', metrics=['accuracy'])

    cb = [
        callbacks.TensorBoard(log_dir=logdir)
    ]
    history = model.fit(x_train, y_train, validation_data=(x_test, y_test), batch_size=64, epochs=1000, callbacks=cb, verbose=0)
    return model, history
def _net(block,
         im_width=9,
         im_height=9,
         im_channel=204,
         num_classes=16,
         include_top=True):
    # tensorflow中的tensor通道排序是NHWC
    # (None, 9, 9, 204)
    # change
    input_image = layers.Input(shape=(im_height, im_width, im_channel),
                               dtype="float32")
    # x = layers.Conv2D(filters=64, kernel_size=7, strides=2, padding="SAME", use_bias=False, name="conv1")(input_image)
    x = block()(input_image)
    # print("include_top", include_top)

    if include_top:
        x = layers.GlobalAvgPool2D()(x)  # pool + flatten

        x = layers.Dense(256, activation='relu')(x)
        x = layers.Dropout(0.5)(x)
        x = layers.Dense(512, activation='relu')(x)
        x = layers.Dropout(0.5)(x)

        predict = layers.Dense(16, activation='softmax')(x)
    else:
        predict = x

    model = Model(inputs=input_image, outputs=predict)

    return model
Exemple #3
0
def channel_attention(input_xs, reduction_ratio):  # input_xs (None,50,50,64)
    # 判断输入数据格式,是channels_first还是channels_last
    channel_axis = 1 if k.image_data_format() == "channels_first" else 3
    # get channel 彩色图片为 3
    channel = int(input_xs.shape[channel_axis])  # 64

    maxpool_channel = kl.GlobalMaxPooling2D()(input_xs)  # (None,64) 全局最大池化
    maxpool_channel = kl.Reshape((1, 1, channel))(maxpool_channel)  # ( None,1,1,64)

    avgpool_channel = kl.GlobalAvgPool2D()(input_xs)  # (None,64) 全局平均池化
    avgpool_channel = kl.Reshape((1, 1, channel))(avgpool_channel)  # (None,1,1,64)
    # 权值共享
    dense_one = kl.Dense(units=int(channel * reduction_ratio), activation='relu', kernel_initializer='he_normal', use_bias=True, bias_initializer='zeros')
    dense_two = kl.Dense(units=int(channel), activation='relu', kernel_initializer='he_normal', use_bias=True, bias_initializer='zeros')
    # max path
    mlp_1_max = dense_one(maxpool_channel)  # (None,1,1,32)
    mlp_2_max = dense_two(mlp_1_max)  # (None,1,1,64)
    mlp_2_max = kl.Reshape(target_shape=(1, 1, int(channel)))(mlp_2_max)  # (None,1,1,64)
    # avg path
    mlp_1_avg = dense_one(avgpool_channel)  # (None,1,1,32)
    mlp_2_avg = dense_two(mlp_1_avg)  # (None,1,1,64)
    mlp_2_avg = kl.Reshape(target_shape=(1, 1, int(channel)))(mlp_2_avg)  # (None,1,1,64)

    channel_attention_feature = kl.Add()([mlp_2_max, mlp_2_avg])  # (None,1,1,64)
    channel_attention_feature = kl.Activation('sigmoid')(channel_attention_feature)  # (None,1,1,64)

    multiply_channel_input = kl.Multiply()([channel_attention_feature, input_xs])  # (None,50,50,64)

    return multiply_channel_input
Exemple #4
0
def _resnet(block, blocks_num, im_width=224, im_height=224, num_classes=1000, include_top=True):
    # tensorflow中的tensor通道排序是NHWC
    # (None, 224, 224, 3)
    input_image = layers.Input(shape=(im_height, im_width, 3), dtype="float32")
    x = layers.Conv2D(filters=64, kernel_size=7, strides=2,
                      padding="SAME", use_bias=False, name="conv1")(input_image)
    x = layers.BatchNormalization(momentum=0.9, epsilon=1e-5, name="conv1/BatchNorm")(x)
    x = layers.ReLU()(x)
    x = layers.MaxPool2D(pool_size=3, strides=2, padding="SAME")(x)

    x = _make_layer(block, x.shape[-1], 64, blocks_num[0], name="block1")(x)
    x = _make_layer(block, x.shape[-1], 128, blocks_num[1], strides=2, name="block2")(x)
    x = _make_layer(block, x.shape[-1], 256, blocks_num[2], strides=2, name="block3")(x)
    x = _make_layer(block, x.shape[-1], 512, blocks_num[3], strides=2, name="block4")(x)

    if include_top:
        x = layers.GlobalAvgPool2D()(x)  # pool + flatten
        x = layers.Dense(num_classes, name="logits")(x)
        predict = layers.Softmax()(x)
    else:
        predict = x

    model = Model(inputs=input_image, outputs=predict)

    return model
Exemple #5
0
    def __init__(self, num_videos, finetuning=False, e_finetuning=None):
        super(Discriminator, self).__init__()
        self.relu = layers.LeakyReLU()
        self.num_videos = num_videos

        # in 6*224*224
        # self.pad = Padding(224)  # out 256*256*6
        self.resDown1 = ResBlockDown(6, 64)  # out 128*128*64
        self.resDown2 = ResBlockDown(64, 128)  # out 64*64*128
        self.resDown3 = ResBlockDown(128, 256)  # out 32*32*256
        self.self_att = SelfAttention(256)  # out 32*32*256
        self.resDown4 = ResBlockDown(256, 512)  # out 16*16*512
        self.resDown5 = ResBlockDown(512, 512)  # out 8*8*512
        self.resDown6 = ResBlockDown(512, 512)  # out 4*4*512
        self.res = ResBlockD(512)  # out 512*4*4
        # self.sum_pooling = nn.AdaptiveAvgPool2d((1, 1))  # out 1*1*512
        self.sum_pooling = layers.GlobalAvgPool2D()  # out 1*1*512

        # if not finetuning:
        #     print('Initializing Discriminator weights')
        #     if not os.path.isdir(self.path_to_Wi):
        #         os.mkdir(self.path_to_Wi)
        #     for i in tqdm(range(num_videos)):
        #         if not os.path.isfile(self.path_to_Wi + '/W_' + str(i) + '/W_' + str(i) + '.tar'):
        #             w_i = torch.rand(512, 1)
        #             os.mkdir(self.path_to_Wi + '/W_' + str(i))
        #             torch.save({'W_i': w_i}, self.path_to_Wi + '/W_' + str(i) + '/W_' + str(i) + '.tar')
        self.finetuning = finetuning
        self.e_finetuning = e_finetuning
Exemple #6
0
 def __init__(self, label_num):
     super(DenseNet, self).__init__()
     channel_num = 64
     # first component
     self.layers_lst.extend([
         layers.Conv2D(filters=channel_num,
                       kernel_size=7,
                       strides=2,
                       padding='same'),
         layers.BatchNormalization(),
         layers.MaxPool2D(pool_size=3, strides=2, padding='same')
     ])
     # second: dense and transition block
     growth_rate = 32
     num_conv_in_dense_block = [4, 4, 4, 4]
     for i, num_conv in enumerate(num_conv_in_dense_block):
         self.layers_lst.append(
             DenseBlock(num_channel=growth_rate, num_conv=num_conv))
         channel_num += num_conv * growth_rate
         if i != len(num_conv_in_dense_block) - 1:
             channel_num = channel_num // 2
             self.layers_lst.append(
                 TransitionBlock(num_channel=channel_num))
     # third: final component
     self.layers_lst.extend([
         layers.BatchNormalization(),
         layers.Activation('relu'),
         layers.GlobalAvgPool2D(),
         layers.Dense(units=label_num)
     ])
Exemple #7
0
    def __init__(self,
                 channels,
                 reduction_ratio=16,
                 num_layers=1,
                 data_format="channels_last",
                 **kwargs):
        super(ChannelGate, self).__init__(**kwargs)
        self.data_format = data_format
        mid_channels = channels // reduction_ratio

        self.pool = nn.GlobalAvgPool2D(data_format=data_format, name="pool")
        self.flatten = nn.Flatten()
        self.init_fc = DenseBlock(in_channels=channels,
                                  out_channels=mid_channels,
                                  data_format=data_format,
                                  name="init_fc")
        self.main_fcs = SimpleSequential(name="main_fcs")
        for i in range(num_layers - 1):
            self.main_fcs.children.append(
                DenseBlock(in_channels=mid_channels,
                           out_channels=mid_channels,
                           data_format=data_format,
                           name="fc{}".format(i + 1)))
        self.final_fc = nn.Dense(units=channels,
                                 input_dim=mid_channels,
                                 name="final_fc")
Exemple #8
0
    def __init__(self,
                 channels,
                 init_block_channels,
                 bottleneck,
                 conv1_stride,
                 ordinary_init=False,
                 bends=None,
                 in_channels=3,
                 in_size=(224, 224),
                 classes=1000,
                 data_format="channels_last",
                 **kwargs):
        super(ResNetD, self).__init__(**kwargs)
        self.in_size = in_size
        self.classes = classes
        self.multi_output = (bends is not None)
        self.data_format = data_format

        self.features = MultiOutputSequential(name="features")
        if ordinary_init:
            self.features.add(ResInitBlock(
                in_channels=in_channels,
                out_channels=init_block_channels,
                data_format=data_format,
                name="init_block"))
        else:
            init_block_channels = 2 * init_block_channels
            self.features.add(SEInitBlock(
                in_channels=in_channels,
                out_channels=init_block_channels,
                data_format=data_format,
                name="init_block"))
        in_channels = init_block_channels
        for i, channels_per_stage in enumerate(channels):
            stage = tf.keras.Sequential(name="stage{}".format(i + 1))
            for j, out_channels in enumerate(channels_per_stage):
                strides = 2 if ((j == 0) and (i != 0) and (i < 2)) else 1
                dilation = (2 ** max(0, i - 1 - int(j == 0)))
                stage.add(ResUnit(
                    in_channels=in_channels,
                    out_channels=out_channels,
                    strides=strides,
                    padding=dilation,
                    dilation=dilation,
                    bottleneck=bottleneck,
                    conv1_stride=conv1_stride,
                    data_format=data_format,
                    name="unit{}".format(j + 1)))
                in_channels = out_channels
            if self.multi_output and ((i + 1) in bends):
                stage.do_output = True
            self.features.add(stage)
        self.features.add(nn.GlobalAvgPool2D(
            data_format=data_format,
            name="final_pool"))

        self.output1 = nn.Dense(
            units=classes,
            input_dim=in_channels,
            name="output1")
Exemple #9
0
def build_model(x, y):

    nx = np.empty((len(x), 28, 28, 1))

    for i in range(len(x)):
        nx[i] = x[i].reshape((28, 28, 1))

    sigma_function = 'relu'

    model = keras.Sequential([
        keras.Input(shape=(28, 28, 1)),
        # 28*28*1 -> 28*28*8
        layers.Conv2D(8, 2, (1, 1), padding='same', activation=sigma_function),
        layers.LayerNormalization(),

        # 28*28*8 -> 19*19*16
        layers.Conv2D(16, 2, (2, 2), padding='same',
                      activation=sigma_function),
        layers.LayerNormalization(),

        # 19*19*16 -> 19*19*32
        layers.Conv2D(32, 2, (1, 1), padding='same',
                      activation=sigma_function),
        layers.LayerNormalization(),
        layers.GlobalAvgPool2D(),
        layers.Dense(10, bias_initializer='one', activation="softmax"),
    ])
    model.summary()
    adam = tf.keras.optimizers.Adam()
    model.compile(loss='categorical_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])
    return model, nx, y
def _resnet(block,
            blocks_num,
            im_width=224,
            im_height=224,
            num_classes=1000,
            include_top=True):
    #block:如果18和34层传basicblock,如果是其他的就传bottleneck
    #blocknum是一个列表,是不同卷积层中残差结构的数量(参见resnet的那张表),比如如果是18层就是[2,2,2,2]
    #输入宽度,高度,需要分类的数量
    # tensorflow中的tensor通道排序是NHWC
    # (None, 224, 224, 3)
    input_image = layers.Input(shape=(im_height, im_width, 3), dtype="float32")
    x = layers.Conv2D(filters=64,
                      kernel_size=7,
                      strides=2,
                      padding="SAME",
                      use_bias=False,
                      name="conv1")(input_image)
    x = layers.BatchNormalization(momentum=0.9,
                                  epsilon=1e-5,
                                  name="conv1/BatchNorm")(x)
    x = layers.ReLU()(x)
    x = layers.MaxPool2D(pool_size=3, strides=2, padding="SAME")(x)

    #这里是卷积层1加上第一个池化做完了

    #每做一个makelayer,就生成一整个卷积层
    #x.shape是上一层输出的shape,[batch,height,weight,channel],所以是channel
    #第三个参数是对应层的第一个卷积核的通道数,第四个就是该层中残差模块的数量
    x = _make_layer(block, x.shape[-1], 64, blocks_num[0], name="block1")(x)
    x = _make_layer(block,
                    x.shape[-1],
                    128,
                    blocks_num[1],
                    strides=2,
                    name="block2")(x)
    x = _make_layer(block,
                    x.shape[-1],
                    256,
                    blocks_num[2],
                    strides=2,
                    name="block3")(x)
    x = _make_layer(block,
                    x.shape[-1],
                    512,
                    blocks_num[3],
                    strides=2,
                    name="block4")(x)

    if include_top:
        x = layers.GlobalAvgPool2D()(x)  # 平均池化pool + 展平处理flatten
        x = layers.Dense(num_classes, name="logits")(x)  #全连接
        predict = layers.Softmax()(x)  #转化为概率分布
    else:
        predict = x

    model = Model(inputs=input_image, outputs=predict)

    return model
Exemple #11
0
    def __init__(self, data_format="channels_last", **kwargs):
        super(GlobalAvgMaxPool2D, self).__init__(**kwargs)
        self.axis = get_channel_axis(data_format)

        self.avg_pool = nn.GlobalAvgPool2D(data_format=data_format,
                                           name="avg_pool")
        self.max_pool = nn.GlobalMaxPool2D(data_format=data_format,
                                           name="max_pool")
def Squeeze_Excitation_Module(input, filters, reduction_ratio, name="SE"):

    sq = layers.GlobalAvgPool2D(name=name+"_Squeeze")(input)
    ex1 = layers.Dense(filters//reduction_ratio, activation='relu', name=name+"_Excitation_1")(sq)
    ex2 = layers.Dense(filters, activation='sigmoid', name=name+"_Excitation_2")(ex1)
    ex = layers.Reshape([1, 1, filters], name=name+"_Reshape")(ex2)

    out = layers.Multiply(name=name+"_Multiply")([input, ex])
    
    return out
Exemple #13
0
def CNN(num_classes=10, **kwargs):
    model = tf.keras.Sequential([
        layers.Conv2D(64, 3, 2, 'same', activation=tf.nn.relu, **kwargs),
        layers.Conv2D(64, 3, 1, 'same', activation=tf.nn.relu, **kwargs),
        layers.Conv2D(128, 3, 2, 'same', activation=tf.nn.relu, **kwargs),
        layers.Conv2D(128, 3, 1, 'same', activation=tf.nn.relu, **kwargs),
        layers.Conv2D(num_classes, 3, 1, 'same', **kwargs),
        layers.GlobalAvgPool2D()
    ])
    return model
Exemple #14
0
def build_vgg16(MyConv2d, ishape, nclass, name):
    input = KL.Input(ishape)  # 32

    x = KL.Conv2D(64, (3, 3),
                  strides=(1, 1),
                  padding='same',
                  activation='relu',
                  kernel_initializer='he_normal',
                  name='block1_conv1')(input)
    x = MyConv2d(64, (3, 3), name='block1_conv2')(x)
    x = KL.MaxPooling2D((2, 2),
                        strides=(2, 2),
                        padding='same',
                        name='block1_pool')(x)

    x = MyConv2d(128, (3, 3), name='block2_conv1')(x)
    x = MyConv2d(128, (3, 3), name='block2_conv2')(x)
    x = KL.MaxPooling2D((2, 2),
                        strides=(2, 2),
                        padding='same',
                        name='block2_pool')(x)

    x = MyConv2d(256, (3, 3), name='block3_conv1')(x)
    x = MyConv2d(256, (3, 3), name='block3_conv2')(x)
    x = MyConv2d(256, (3, 3), name='block3_conv3')(x)
    x = KL.MaxPooling2D((2, 2),
                        strides=(2, 2),
                        padding='same',
                        name='block3_pool')(x)

    x = MyConv2d(512, (3, 3), name='block4_conv1')(x)
    x = MyConv2d(512, (3, 3), name='block4_conv2')(x)
    x = MyConv2d(512, (3, 3), name='block4_conv3')(x)
    x = KL.MaxPooling2D((2, 2),
                        strides=(2, 2),
                        padding='same',
                        name='block4_pool')(x)  # 2

    x = MyConv2d(512, (3, 3), name='block5_conv1')(x)
    x = MyConv2d(512, (3, 3), name='block5_conv2')(x)
    x = MyConv2d(512, (3, 3), name='block5_conv3')(x)
    x = KL.MaxPooling2D((2, 2),
                        strides=(2, 2),
                        padding='same',
                        name='block5_pool')(x)  # 2*2 -> 1*1

    x = KL.GlobalAvgPool2D()(x)

    # output = KL.Dense(nclass, activation='softmax', name='predictions')(x)
    # XXX in case `mixed_precision` being used
    output = KL.Dense(nclass, activation=None, name='predictions')(x)
    output = KL.Activation('softmax', dtype='float32')(output)
    kmdl = KM.Model(input, output, name=name)

    return kmdl
Exemple #15
0
def mobilenetv1(shape, num_classes, activation="relu", alpha=1.0):
    """Obtain a mobilenet V1 model
    Args:
        shape: the shape of the input tensor
        num_classes: the number of outputs
        activation: the activation function of each later
        alpha: hyper parameter for adjusting the width of convolution layers
    """
    model = models.Sequential()
    model.add(layers.InputLayer(input_shape=shape))

    def add_conv_block(channels, strides, kernel_size=3):
        channels = int(channels * alpha)
        model.add(
            layers.Conv2D(
                channels, kernel_size=kernel_size, use_bias=False, padding="same"
            )
        )
        model.add(layers.BatchNormalization())
        model.add(layers.Activation(activation))

    def add_dw_sep_block(channels, strides):
        channels = int(channels * alpha)
        model.add(
            layers.DepthwiseConv2D(
                kernel_size=3, strides=strides, use_bias=False, padding="same"
            )
        )
        model.add(layers.BatchNormalization())
        model.add(layers.Activation(activation))
        add_conv_block(channels, strides=1, kernel_size=1)

    add_conv_block(32, 2)

    model_shapes_channel_strides = [
        (64, 1),
        (128, 2),
        (128, 1),
        (256, 2),
        (256, 1),
        (512, 2),
        *[(512, 1) for _ in range(5)],
        (1024, 2),
        (1024, 2),
    ]

    for c, s in model_shapes_channel_strides:
        add_dw_sep_block(c, s)

    model.add(layers.GlobalAvgPool2D())
    model.add(layers.Dense(1000, activation=activation))
    model.add(layers.Dense(num_classes, activation="softmax", name="softmax"))

    return model
Exemple #16
0
 def __init__(self, num_blocks, **kwargs):
     super(ResNet, self).__init__(**kwargs)
     self.conv = layers.Conv2D(64, kernel_size=7, strides=2, padding='same')
     self.bn = layers.BatchNormalization()
     self.relu = layers.Activation('relu')
     self.mp = layers.MaxPool2D(pool_size=3, strides=2, padding='same')
     self.resnet_block1 = ResnetBlock(64, num_blocks[0], first_block=True)
     self.resnet_block2 = ResnetBlock(128, num_blocks[1])
     self.resnet_block3 = ResnetBlock(256, num_blocks[2])
     self.resnet_block4 = ResnetBlock(512, num_blocks[3])
     self.gap = layers.GlobalAvgPool2D()
     self.fc = layers.Dense(units=10, activation=activations.softmax)
Exemple #17
0
def get_training_model():
    model = tf.keras.Sequential([
        layers.Conv2D(16, (5, 5), activation="relu", input_shape=(28, 28, 1)),
        layers.MaxPooling2D(pool_size=(2, 2)),
        layers.Conv2D(32, (5, 5), activation="relu"),
        layers.MaxPooling2D(pool_size=(2, 2)),
        layers.Dropout(0.2),
        layers.GlobalAvgPool2D(),
        layers.Dense(128, activation="relu"),
        layers.Dense(10, activation="softmax"),
    ])
    return model
Exemple #18
0
def FlipOutCNN(num_classes, **kwargs):
    Conv2DFlipOut = tfp.layers.Convolution2DFlipout

    model = tf.keras.Sequential([
        Conv2DFlipOut(64, 3, 2, 'same', activation=tf.nn.relu, **kwargs),
        Conv2DFlipOut(64, 3, 1, 'same', activation=tf.nn.relu, **kwargs),
        Conv2DFlipOut(128, 3, 2, 'same', activation=tf.nn.relu, **kwargs),
        Conv2DFlipOut(128, 3, 1, 'same', activation=tf.nn.relu, **kwargs),
        Conv2DFlipOut(num_classes, 3, 1, 'same', **kwargs),
        layers.GlobalAvgPool2D()
    ])
    return model
Exemple #19
0
def ReparamCNN(num_classes, **kwargs):
    Conv2DReparam = tfp.layers.Convolution2DReparameterization

    model = tf.keras.Sequential([
        Conv2DReparam(64, 3, 2, 'same', activation=tf.nn.relu, **kwargs),
        Conv2DReparam(64, 3, 1, 'same', activation=tf.nn.relu, **kwargs),
        Conv2DReparam(128, 3, 2, 'same', activation=tf.nn.relu, **kwargs),
        Conv2DReparam(128, 3, 1, 'same', activation=tf.nn.relu, **kwargs),
        Conv2DReparam(num_classes, 3, 1, 'same'),
        layers.GlobalAvgPool2D()
    ])
    return model
    def __init__(
            self,
            num_classes,
            blocks,
            m_layers,
            channels,
            feature_dim=512,
            loss='softmax',
            **kwargs
    ):
        super(OSNetTF, self).__init__()
        num_blocks = len(blocks)
        assert num_blocks == len(m_layers)
        assert num_blocks == len(channels) - 1
        self.loss = loss
        self.channels = channels

        # convolutional backbone
        self.conv1 = ConvLayerTF(3, channels[0], 7, stride=2, padding="SAME")  # original padding=3
        # self.maxpool = nn.MaxPool2d(3, stride=2, padding="SAME")  # original padding=1
        self.maxpool = layers.MaxPool2D(3, strides=2, padding="SAME")  # original padding=1
        self.conv2 = self._make_layer(
            blocks[0],
            m_layers[0],
            channels[0],
            channels[1],
            reduce_spatial_size=True,
        )
        self.conv3 = self._make_layer(
            blocks[1],
            m_layers[1],
            channels[1],
            channels[2],
            reduce_spatial_size=True
        )
        self.conv4 = self._make_layer(
            blocks[2],
            m_layers[2],
            channels[2],
            channels[3],
            reduce_spatial_size=False
        )
        self.conv5 = Conv1x1TF(channels[3], channels[3])
        self.global_avgpool = layers.GlobalAvgPool2D()
        # fully connected layer
        self.fc = self._construct_fc_layer(
            feature_dim, channels[3], dropout_p=None
        )
        # identity classification layer
        # self.classifier = nn.Linear(self.feature_dim, num_classes)
        # self.classifier = layers.Dense(num_classes)
        self.classifier = layers.Dense(num_classes, kernel_initializer=tf.keras.initializers.RandomNormal(0, 0.01))
 def __init__(self,
              res_unit,
              nunit_per_block,
              dilate_config=None,
              num_classes=1000,
              **kwargs):
     super(ResNet, self).__init__(**kwargs)
     self.dilation_rate = 1
     if dilate_config is None:
         dilate_config = [False, False, False, False]
     if len(dilate_config) != 4:
         raise "`dilate_config` should be None or a 4-element tuple, got %d" % len(
             dilate_config)
     if dilate_config[0] is not False:
         raise "the 1st elemnt of `dilate_config` has to be False"
     self.conv = layers.Conv2D(64,
                               7,
                               strides=2,
                               padding="same",
                               use_bias=False,
                               name="conv")
     self.bn = layers.BatchNormalization(epsilon=1.001e-5, name="bn")
     self.relu = layers.Activation("relu", name="relu")
     self.maxpool = layers.MaxPool2D(pool_size=3,
                                     strides=2,
                                     padding="same",
                                     name="maxpool")
     self.block0 = self._make_block(res_unit,
                                    64,
                                    nunit_per_block[0],
                                    dilate=dilate_config[0],
                                    name="block0")
     self.block1 = self._make_block(res_unit,
                                    128,
                                    nunit_per_block[1],
                                    stride=2,
                                    dilate=dilate_config[1],
                                    name="block1")
     self.block2 = self._make_block(res_unit,
                                    256,
                                    nunit_per_block[2],
                                    stride=2,
                                    dilate=dilate_config[2],
                                    name="block2")
     self.block3 = self._make_block(res_unit,
                                    512,
                                    nunit_per_block[3],
                                    stride=2,
                                    dilate=dilate_config[3],
                                    name="block3")
     self.avgpool = layers.GlobalAvgPool2D(name="global_avgpool")
     self.dense = layers.Dense(num_classes, name="dense")
Exemple #22
0
    def __init__(self,
                 channels,
                 init_block_channels,
                 bottleneck,
                 conv1_stride,
                 dilated=False,
                 in_channels=3,
                 in_size=(224, 224),
                 classes=1000,
                 data_format="channels_last",
                 **kwargs):
        super(ResNetA, self).__init__(**kwargs)
        self.in_size = in_size
        self.classes = classes
        self.data_format = data_format

        self.features = SimpleSequential(name="features")
        self.features.add(SEInitBlock(
            in_channels=in_channels,
            out_channels=init_block_channels,
            data_format=data_format,
            name="init_block"))
        in_channels = init_block_channels
        for i, channels_per_stage in enumerate(channels):
            stage = SimpleSequential(name="stage{}".format(i + 1))
            for j, out_channels in enumerate(channels_per_stage):
                if dilated:
                    strides = 2 if ((j == 0) and (i != 0) and (i < 2)) else 1
                    dilation = (2 ** max(0, i - 1 - int(j == 0)))
                else:
                    strides = 2 if (j == 0) and (i != 0) else 1
                    dilation = 1
                stage.add(ResAUnit(
                    in_channels=in_channels,
                    out_channels=out_channels,
                    strides=strides,
                    padding=dilation,
                    dilation=dilation,
                    bottleneck=bottleneck,
                    conv1_stride=conv1_stride,
                    data_format=data_format,
                    name="unit{}".format(j + 1)))
                in_channels = out_channels
            self.features.add(stage)
        self.features.add(nn.GlobalAvgPool2D(
            data_format=data_format,
            name="final_pool"))

        self.output1 = nn.Dense(
            units=classes,
            input_dim=in_channels,
            name="output1")
    def __init__(self,
                 block,
                 blocks_num,
                 num_classes=1000,
                 include_top=True,
                 **kwargs):
        super(ResNet, self).__init__(**kwargs)
        self.include_top = include_top
        self.conv1 = layers.Conv2D(filters=64,
                                   kernel_size=7,
                                   strides=2,
                                   padding="SAME",
                                   use_bias=False,
                                   name="conv1")
        self.bn1 = layers.BatchNormalization(momentum=0.9,
                                             epsilon=1.001e-5,
                                             name="conv1/BatchNorm")
        self.relu1 = layers.ReLU(name="relu1")
        self.maxpool1 = layers.MaxPool2D(pool_size=3,
                                         strides=2,
                                         padding="SAME",
                                         name="maxpool1")

        self.block1 = self._make_layer(block,
                                       True,
                                       64,
                                       blocks_num[0],
                                       name="block1")
        self.block2 = self._make_layer(block,
                                       False,
                                       128,
                                       blocks_num[1],
                                       strides=2,
                                       name="block2")
        self.block3 = self._make_layer(block,
                                       False,
                                       256,
                                       blocks_num[2],
                                       strides=2,
                                       name="block3")
        self.block4 = self._make_layer(block,
                                       False,
                                       512,
                                       blocks_num[3],
                                       strides=2,
                                       name="block4")

        if self.include_top:
            self.avgpool = layers.GlobalAvgPool2D(name="avgpool1")
            self.fc = layers.Dense(num_classes, name="logits")
            self.softmax = layers.Softmax()
Exemple #24
0
def SE_ResNet(height, width, num_class, res_block, blocks_list):
    """
    ResNet网络结构,通过传入不同的残差块和重复的次数进行不同层数的ResNet构建
    :param height: 网络输入宽度
    :param width: 网络输入高度
    :param num_class: 分类数量
    :param res_block: 残差块单元
    :param blocks_list: 每个残差单元重复的次数列表
    :return:
    """
    input_image = layers.Input(shape=(height, width, 3))
    x = layers.Conv2D(filters=64,
                      kernel_size=7,
                      strides=2,
                      padding='SAME',
                      name='conv1',
                      use_bias=False)(input_image)
    x = layers.BatchNormalization(name="conv1/BatchNorm")(x)
    x = layers.ReLU()(x)
    x = layers.MaxPool2D(pool_size=3,
                         strides=2,
                         padding='SAME',
                         name="max_pool")(x)

    x = resblock_body(res_block, 64, blocks_list[0], strides=1,
                      name='conv2_x')(x)
    x = resblock_body(res_block,
                      128,
                      blocks_list[1],
                      strides=2,
                      name='conv3_x')(x)
    x = resblock_body(res_block,
                      256,
                      blocks_list[2],
                      strides=2,
                      name='conv4_x')(x)
    x = resblock_body(res_block,
                      512,
                      blocks_list[3],
                      strides=2,
                      name='conv5_x')(x)

    x = layers.GlobalAvgPool2D(name='avg_pool')(x)
    x = layers.Dense(num_class, name="logits")(x)
    outputs = layers.Softmax()(x)

    model = models.Model(inputs=input_image, outputs=outputs)
    model.summary()

    return model
Exemple #25
0
    def create_mobilevit(num_classes=5):
        inputs = keras.Input((image_size, image_size, 3))
        x = layers.Rescaling(scale=1.0 / 255)(inputs)

        # Initial conv-stem -> NV2 block.
        x = conv_block(x, filters=16)
        x = inverted_residual_block(x,
                                    expanded_channels=16 * expansion_factor,
                                    output_channels=16)

        # Downsampling with MV2 block.
        x = inverted_residual_block(x,
                                    expanded_channels=16 * expansion_factor,
                                    output_channels=24,
                                    strides=2)
        x = inverted_residual_block(x,
                                    expanded_channels=24 * expansion_factor,
                                    output_channels=24)
        x = inverted_residual_block(x,
                                    expanded_channels=24 * expansion_factor,
                                    output_channels=24)

        # First MV2 -> MobileViT block.
        x = inverted_residual_block(x,
                                    expanded_channels=24 * expansion_factor,
                                    output_channels=48,
                                    strides=2)
        x = mobilevit_block(x, num_blocks=2, projection_dim=64)

        # Second MV2 -> MobileViT block.
        x = inverted_residual_block(x,
                                    expanded_channels=64 * expansion_factor,
                                    output_channels=64,
                                    strides=2)
        x = mobilevit_block(x, num_blocks=4, projection_dim=80)

        # Third MV2 -> MobileViT block.
        x = inverted_residual_block(x,
                                    expanded_channels=80 * expansion_factor,
                                    output_channels=80,
                                    strides=2)
        x = mobilevit_block(x, num_blocks=3, projection_dim=96)
        x = conv_block(x, filters=320, kernel_size=1, strides=1)

        # Classification head.
        x = layers.GlobalAvgPool2D()(x)
        outputs = layers.Dense(num_classes, activation="softmax")(x)

        return keras.Model(inputs, outputs)
Exemple #26
0
def _resnet(block,
            blocks_num,
            img_height=224,
            img_width=224,
            num_class=1000,
            include_top=True):
    input_image = layers.Input(shape=(img_height, img_width, 3),
                               dtype='float32')
    x = layers.Conv2D(filters=64,
                      kernel_size=7,
                      strides=2,
                      padding='same',
                      use_bias=False,
                      name='conv1')(input_image)
    x = layers.BatchNormalization(momentum=0.9,
                                  epsilon=1e-5,
                                  name='conv1/BatchNorm')(x)
    x = layers.ReLU()(x)
    x = layers.MaxPool2D(pool_size=3, strides=2, padding='same')(x)

    x = _make_layer(block, x.shape[-1], 64, blocks_num[0], name='block1')(x)
    x = _make_layer(block,
                    x.shape[-1],
                    128,
                    blocks_num[1],
                    strides=2,
                    name='block2')(x)
    x = _make_layer(block,
                    x.shape[-1],
                    256,
                    blocks_num[2],
                    strides=2,
                    name='block3')(x)
    x = _make_layer(block,
                    x.shape[-1],
                    512,
                    blocks_num[3],
                    strides=2,
                    name='block4')(x)

    if include_top:
        x = layers.GlobalAvgPool2D()(x)
        x = layers.Dense(num_class, name='logits')(x)
        predict = layers.Softmax()(x)
    else:
        predict = x

    model = Model(inputs=input_image, outputs=predict)
    return model
Exemple #27
0
def build_model():
    conv_base = VGG16(weights='imagenet', include_top=False, input_shape=(256, 256, 3))
    conv_base.trainable = False
    gap = layers.GlobalAvgPool2D()(conv_base.layers[-1].output)
    dropout_1 = layers.Dropout(0.25)(gap)
    dense1 = layers.Dense(512)(dropout_1)
    batchNorm = layers.BatchNormalization()(dense1)
    activation = layers.Activation(activation='relu')(batchNorm)
    dropout_2 = layers.Dropout(0.25)(activation)
    dense2 = layers.Dense(128, activation='relu')(dropout_2)
    dropout_3 = layers.Dropout(0.25)(dense2)
    y = layers.Dense(1, activation='sigmoid')(dropout_3)
    model = models.Model(conv_base.inputs, y)
    model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
    return model
Exemple #28
0
def build_model():
    conv_base = VGG16(include_top=False, input_shape=(150, 150, 3))
    conv_base.trainable = False
    x = conv_base.input
    gap = layers.GlobalAvgPool2D()(conv_base.output)
    dense = layers.Dense(2)(gap)
    batch = layers.BatchNormalization()(dense)
    act = layers.Activation(activation='relu')(batch)
    do = layers.Dropout(0.25)(act)
    y = layers.Dense(1, activation='sigmoid')(do)

    model = models.Model(x, y)
    model.compile(optimizer=optimizers.RMSprop(learning_rate=0.0001, momentum=0.4), loss='binary_crossentropy', metrics=['accuracy'])

    return model
Exemple #29
0
    def __init__(self, Layer_dim, num_classes=100):
        super(ResNet, self).__init__()

        self.stem = Sequential([layers.Conv2D(64, (3, 3), strides=1, padding='same'),
                                layers.BatchNormalization(),
                                layers.Activation('relu'),
                                layers.MaxPool2D(pool_size=(2, 2), strides=(1, 1), padding='same')
                                ])
        self.layer1 = self.build_resblock(64, Layer_dim[0])
        self.layer2 = self.build_resblock(128, Layer_dim[1], stride=2)
        self.layer3 = self.build_resblock(256, Layer_dim[2], stride=2)
        self.layer4 = self.build_resblock(512, Layer_dim[3], stride=2)

        self.avgpool = layers.GlobalAvgPool2D()
        self.fc = layers.Dense(num_classes)
Exemple #30
0
    def __init__(self,
                 channels,
                 init_block_channels,
                 bottleneck,
                 dropout_rate=0.0,
                 in_channels=3,
                 in_size=(224, 224),
                 classes=1000,
                 data_format="channels_last",
                 **kwargs):
        super(ResNeStA, self).__init__(**kwargs)
        self.in_size = in_size
        self.classes = classes
        self.data_format = data_format

        self.features = SimpleSequential(name="features")
        self.features.add(SEInitBlock(
            in_channels=in_channels,
            out_channels=init_block_channels,
            data_format=data_format,
            name="init_block"))
        in_channels = init_block_channels
        for i, channels_per_stage in enumerate(channels):
            stage = SimpleSequential(name="stage{}".format(i + 1))
            for j, out_channels in enumerate(channels_per_stage):
                strides = 2 if (j == 0) and (i != 0) else 1
                stage.add(ResNeStAUnit(
                    in_channels=in_channels,
                    out_channels=out_channels,
                    strides=strides,
                    bottleneck=bottleneck,
                    data_format=data_format,
                    name="unit{}".format(j + 1)))
                in_channels = out_channels
            self.features.add(stage)
        self.features.add(nn.GlobalAvgPool2D(
            data_format=data_format,
            name="final_pool"))

        self.output1 = SimpleSequential(name="output1")
        if dropout_rate > 0.0:
            self.output1.add(nn.Dropout(
                rate=dropout_rate,
                name="output1/dropout"))
        self.output1.add(nn.Dense(
            units=classes,
            input_dim=in_channels,
            name="output1/fc"))