コード例 #1
0
    def creat_model(self):
        #測試使用類似ResNet架構
            
        input_layer   = Input(shape = [400, 400, self.channels, ], name = "Input_layer")
        #第一區多用較大的stride降維
        block_0_con2d_layer   = layers.Conv2D(self.start_nodes, 3, 2, name = "block_0_con2d_layer")(input_layer)
        block_0_bn_layer      = layers.BatchNormalization(name = "block_0_bn_layer")(block_0_con2d_layer)
        block_0_act_layer     = layers.PReLU(name = "block_0_act_layer")(block_0_bn_layer)
        block_0_maxpool_layer = layers.MaxPool2D(name = "block_0_maxpool_layer")(block_0_act_layer)
        #第二區由數個block組成
        #通常會在每個block開頭先降解析度並增加維度
        block_1_reduce_image_size = self.reduce_image_size(block_0_maxpool_layer, 1)
        block_1_main_con2d_net    = self.con2d_net_block(block_1_reduce_image_size, 1, 2)

        block_2_reduce_image_size = self.reduce_image_size(block_1_main_con2d_net, 2)
        block_2_main_con2d_net    = self.con2d_net_block(block_2_reduce_image_size, 2, 2)

        block_3_reduce_image_size = self.reduce_image_size(block_2_main_con2d_net, 3)
        block_3_main_con2d_net    = self.con2d_net_block(block_3_reduce_image_size, 3, 3)

        block_4_reduce_image_size = self.reduce_image_size(block_3_main_con2d_net, 4)
        block_4_main_con2d_net    = self.con2d_net_block(block_4_reduce_image_size, 4, 5)

        block_5_reduce_image_size = self.reduce_image_size(block_4_main_con2d_net, 5)
        block_5_main_con2d_net    = self.con2d_net_block(block_5_reduce_image_size, 5, 2)

        #最後以一個avg_pool降維並連接到輸出
        avg_pool_layer = layers.AvgPool2D(4, 1)(block_5_main_con2d_net)
        flatten_layer  = layers.Flatten()(avg_pool_layer)
        output_layer   = layers.Dense(self.number_of_class, activation = 'softmax')(flatten_layer)

        self.model    =  Model(input_layer, output_layer)
コード例 #2
0
def inception_v1(inputs):
    """Inception
    1.discard dense layer that has too many params
    2.dense layer can overfit
    3.network in network: Use conv_1X1 to reduce params and use conv_3X3, conv_5X5 to make
      [receptive field rich]. Make feature rich.

    """
    x = layers.Conv2D(64, 7, 2, padding='same', activation='relu')(inputs)
    x = layers.MaxPool2D(3, 2, padding='same')(x)
    x = layers.Conv2D(64, 1, padding='same', activation='relu')(x)
    x = layers.Conv2D(192, 3, padding='same', activation='relu')(x)
    x = layers.MaxPool2D(3, 2, padding='same')(x)

    x1 = inception_block(x, 64, 96, 128, 16, 32, 32)
    x2 = inception_block(x1, 128, 128, 192, 32, 96, 64)
    x3 = layers.MaxPool2D(3, strides=2, padding='same')(x2)
    x4 = inception_block(x3, 192, 96, 208, 16, 48, 64)
    x5 = inception_block(x4, 160, 112, 224, 24, 64, 64)
    x6 = inception_block(x5, 128, 128, 256, 24, 64, 64)
    x7 = inception_block(x6, 112, 144, 288, 32, 64, 64)
    x8 = inception_block(x7, 256, 160, 320, 32, 128, 128)

    x9 = layers.MaxPool2D(3, 2, padding='same')(x8)

    x10 = inception_block(x9, 256, 160, 320, 32, 128, 128)
    x11 = inception_block(x10, 384, 192, 384, 48, 128, 128)

    x12 = layers.AvgPool2D(7, strides=1)(x11)
    x13 = layers.Dropout(rate=0.4)(x12)
    x14 = layers.Dense(1000, activation='softmax')(x13)

    return x14
コード例 #3
0
    def call(self, x):
        # Define the forward pass
        # input: instead of wxhx1 use wx1xh
        # output: instead of 3x3x1xhxwx3 get 3x1xfxtx1xc
        L = x.shape[1]
        C = x.shape[2]
        # inputs = tf.reshape(x, [-1, L, 1, C])  # [N, L, 1, C]
        inputs = tf.reshape(x, [-1, L, 3, C])  # [N, L, 1, C]

        res0_1 = self.conv1(inputs)
        # through block1
        res1_1 = self.bn1_2(self.conv3(self.activ1_1(self.bn1_1(self.conv2(res0_1)))))
        res1_1_1 = self.activ1_1_1(self.bn1_1_1(self.conv4(res0_1)))
        res1_2 = l.concatenate([res1_1, res1_1_1])
        res1_3 = self.activ1_2(res1_2)
        # through block 2
        res2_1 = self.bn2_2(self.conv6(self.activ2_1(self.bn2_1(self.conv5(res1_3)))))
        res2_1_1 = self.activ2_1_1(self.bn2_1_1(self.conv7(res1_2)))
        res2_2 = l.concatenate([res2_1, res2_1_1])
        res2_3 = self.activ2_2(res2_2)
        # through block 3
        res3_1 = self.bn3_2(self.conv9(self.activ3_1(self.bn3_1(self.conv8(res2_3)))))
        res3_1_1 = self.activ3_1_1(self.bn3_1_1(self.conv10(res2_3)))
        res3_2 = l.concatenate([res3_1, res3_1_1])
        res3_3 = self.activ3_2(res3_2)
        self.avg_pool = l.AvgPool2D(pool_size=res3_3.shape[1:3], strides=1)  # Average Pooling
        res = self.avg_pool(res3_3)
        return self.out_block(res)
コード例 #4
0
ファイル: model.py プロジェクト: niuxinzan/tf20_dl
 def __init__(self, num_classes, **kwargs):
     super(InceptionAux, self).__init__(**kwargs)
     self.averagePool = layers.AvgPool2D(pool_size=5, strides=3)  #平均池化
     self.conv = layers.Conv2D(128, kernel_size=1, activation='relu')
     self.fc1 = layers.Dense(1024, activation='relu')
     self.fc2 = layers.Dense(num_classes)
     self.softmax = layers.Softmax()
コード例 #5
0
    def __init__(self):
        self.cnn1 = layers.Conv2D(
            filters=6,
            kernel_size=5,
            activation='sigmoid',
        )
        self.avgpool1 = layers.AvgPool2D(pool_size=2, strides=2)

        self.cnn2 = layers.Conv2D(filters=16,
                                  kernel_size=5,
                                  activation='sigmoid')
        self.avgpool2 = layers.AvgPool2D(pool_size=2, strides=2)
        self.flat = layers.Flatten()
        self.dense1 = layers.Dense(120, activation='sigmoid')
        self.dense2 = layers.Dense(84, activation='sigmoid')
        self.dense3 = layers.Dense(10, activation='sigmoid')
コード例 #6
0
ファイル: model.py プロジェクト: zhaolin123/first
def ResNet_20():
    inputs = tf.keras.Input(shape=(256, 256, 3), name="img")
    x = inputs

    x = layers.Conv2D(64, 7)(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)
    x = layers.MaxPool2D()(x)
    for _ in range(3):
        x = ResBlock(num_feature_in=64, num_feature_out=64)(x)
    x = ResBlock(num_feature_in=64, num_feature_out=128, strides=(2, 2))(x)
    for _ in range(2):
        x = ResBlock(num_feature_in=128, num_feature_out=128)(x)
    x = ResBlock(num_feature_in=128, num_feature_out=256, strides=(2, 2))(x)
    for _ in range(2):
        x = ResBlock(num_feature_in=256, num_feature_out=256)(x)
    x = ResBlock(num_feature_in=256, num_feature_out=512, strides=(2, 2))(x)
    for _ in range(2):
        x = ResBlock(num_feature_in=512, num_feature_out=512)(x)
    x = layers.AvgPool2D()(x)
    x = layers.Flatten()(x)
    outputs = layers.Dense(10, activation="softmax")(x)

    model = tf.keras.Model(inputs=inputs, outputs=outputs, name="ResNet_20")
    model.compile(optimizer=tf.keras.optimizers.Adam(),
                  loss="categorical_crossentropy",
                  metrics=["acc"])
    return model
コード例 #7
0
def GoogleNet(im_height=224, im_width=224, class_num=1000, aux_logits=False):
    # tensorflow 中的tensor通道排序是NHWC
    input_image = layers.Input(shape=(im_height, im_width, 3), dtype="float32")
    # (None, 224,224, 3)
    x = layers.Conv2D(64,
                      kernel_size=7,
                      strides=2,
                      padding="SAME",
                      activation="relu",
                      name="conv2d_1")(input_image)
    # (None, 112,112,64)
    x = layers.MaxPool2D(pool_size=3,
                         strides=2,
                         padding="SAME",
                         name="maxpool_1")(x)
    # (None, 56,56, 64)
    x = layers.Conv2D(64, kernel_size=1, activation="relu", name="conv2d_2")(x)
    # (None, 28, 28,192)
    x = Inception(64, 96, 128, 16, 32, 32, name="inception_3a")(x)
    # (None, 28, 28,256)
    x = Inception(128, 128, 192, 32, 96, 64, name="inception_3b")(x)
    # (None, 28, 28, 480)
    x = layers.MaxPool2D(pool_size=3,
                         strides=2,
                         padding="SAME",
                         name="maxpool_3")(x)
    # (None, 14,14,480)
    x = Inception(192, 96, 208, 16, 48, 64, name="inception_4a")(x)
    if aux_logits:
        aux1 = InceptionAux(class_num, name="aux_1")(x)
    # (None, 14,14,512)

    x = Inception(160, 112, 224, 24, 64, 64, name="inception_4b")(x)
    # (None, 14,14,512)
    x = Inception(128, 128, 256, 24, 64, 64, name="inception_4c")(x)
    # (None, 14,14,512)
    x = Inception(112, 144, 288, 32, 64, 64, name="inception_4d")(x)
    if aux_logits:
        aux2 = InceptionAux(class_num, name="aux_2")(x)
    # (None,14,14,528)
    x = Inception(256, 160, 320, 32, 128, 128, name="inception_5a")(x)
    # (None, 7, 7, 832)
    x = Inception(384, 192, 384, 48, 128, 128, name="inception_5b")(x)
    # (None,7,7,1024)
    x = layers.AvgPool2D(pool_size=7, strides=1, name="avgpool_1")(x)
    # (None, 1,1,1024)
    x = layers.Flatten(name="output_flatten")(x)
    # (None,1024)
    x = layers.Dropout(rate=0.4, name="output_dropout")(x)
    x = layers.Dense(class_num, name='output_dense')(x)
    # (None, class_num)
    aux3 = layers.Softmax(name="aux_3")(x)  #aux3 is main classicer
    if aux_logits:
        model = models.Model(inputs=input_image, outputs=[aux1, aux2, aux3])
    else:
        model = models.Model(inputs=input_image, outputs=aux3)
    return model
コード例 #8
0
    def __init__(self, num_classes, **kwargs):
        super(InceptionAux, self).__init__(**kwargs)
        self.averagePool = layers.AvgPool2D(pool_size=5, strides=3)
        self.conv = layers.Conv2D(128, kernel_size=1, use_bias=False, name="conv/conv")
        self.bn1 = layers.BatchNormalization(momentum=0.9, epsilon=1e-5, name="conv/bn")
        self.rule1 = layers.ReLU()

        self.fc1 = layers.Dense(1024, activation="relu", name="fc1")
        self.fc2 = layers.Dense(num_classes, name="fc2")
        self.softmax = layers.Softmax()
コード例 #9
0
ファイル: subsample.py プロジェクト: xlnwel/d2rl
    def build(self, input_shape):
        filters = self._filters or input_shape[-1]
        conv_name = f'{self.scope_name}/conv'
        norm_cls = get_norm(self._norm)
        norm_name = f'{self.scope_name}/{self._norm}'
        act_name = f'{self.scope_name}/{self._activation}'
        act_cls = get_activation(self._activation, return_cls=True)
        pool_name = f'{self.scope_name}/{self._pool_type}'

        self._layers = []
        if self._conv is not None:
            self._layers += [
                self._conv(filters,
                           self._filter_size,
                           strides=1,
                           padding='same',
                           name=conv_name,
                           **self._kwargs),
                norm_cls(**self._norm_kwargs, name=norm_name),
                act_cls(name=act_name, **self._act_kwargs),
            ]
        if self._pool_type == 'max':
            self._layers += [
                layers.MaxPool2D(self._filter_size,
                                 strides=self._strides,
                                 padding='same',
                                 name=pool_name)
            ]
        elif self._pool_type == 'avg':
            self._layers += [
                layers.AvgPool2D(self._filter_size,
                                 strides=self._strides,
                                 padding='same',
                                 name=pool_name)
            ]
        elif self._pool_type == 'maxblur':
            self._layers += [
                layers.MaxPool2D(self._filter_size,
                                 strides=1,
                                 padding='same',
                                 name=f'{self.scope_name}/max'),
                blurpool(self._filter_size,
                         strides=self._strides,
                         pad_mode=self._pad_mode,
                         name=f'{self.scope_name}/blur')
            ]
        elif self._pool_type == 'blur':
            self._layers += [
                blurpool(self._filter_size,
                         strides=self._strides,
                         pad_mode=self._pad_mode,
                         name=pool_name)
            ]
        else:
            raise ValueError(f'Unkonwn pool type: {self._pool_type}')
コード例 #10
0
 def __init__(self,numclass = 10,expansion = 4):
     super(Resnet, self).__init__()
     self.expansion = expansion
     self.conv1 = Conv1(64)
     self.layer1 = self.make_layer(channel=64,block=3,stride=1)
     self.layer2 = self.make_layer(128,8,stride=2)
     self.layer3 = self.make_layer(256,36,stride=2)
     self.layer4 = self.make_layer(512,3,stride=2)
     self.avgpool = layers.AvgPool2D(7,strides=1)
     self.fc = layers.Dense(numclass)
     self.fal = layers.Flatten()
コード例 #11
0
def Squeeze(_in, out_channels, se_ratio=12):
    # First pool
    avg_pool = layers.AvgPool2D(1)(_in)
    
    # Next, fully connected sequential layers
    x = layers.Conv2D(out_channels // se_ratio, kernel_size=1, padding='valid')(avg_pool)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)
    x = layers.Conv2D(out_channels, kernel_size=1, padding='valid', activation='sigmoid')(x)

    return layers.multiply([_in, x])
コード例 #12
0
def inception_module_A(x,
                       filters_b1,
                       filters_b2_1,
                       filters_b2_2,
                       filters_b3_1,
                       filters_b3_2,
                       filters_b3_3,
                       filters_b4,
                       name="Inception"):

    branch1 = layers.Conv2D(filters_b1,
                            1,
                            activation="relu",
                            name=name + "_branch_1_1x1")(x)

    branch2 = layers.Conv2D(filters_b2_1,
                            1,
                            activation="relu",
                            name=name + "_branch_2_1x1")(x)
    branch2 = layers.Conv2D(filters_b2_2,
                            3,
                            padding="same",
                            activation="relu",
                            name=name + "_branch_2_3x3")(branch2)

    branch3 = layers.Conv2D(filters_b3_1,
                            1,
                            activation="relu",
                            name=name + "_branch_3_1x1")(x)
    branch3 = layers.Conv2D(filters_b3_2,
                            3,
                            padding="same",
                            activation="relu",
                            name=name + "_branch_3_3x3_1")(branch3)
    branch3 = layers.Conv2D(filters_b3_3,
                            3,
                            padding="same",
                            activation="relu",
                            name=name + "_branch_3_3x3_2")(branch3)

    branch4 = layers.AvgPool2D(pool_size=3,
                               strides=1,
                               padding="same",
                               name=name + "_branch_4_pool")(x)
    branch4 = layers.Conv2D(filters_b4,
                            1,
                            activation="relu",
                            name=name + "_branch_4_1x1")(branch4)

    x = layers.Concatenate(name=name +
                           "_Mixed")([branch1, branch2, branch3, branch4])

    return x
コード例 #13
0
def LeNet(num_classes, input_shape=(32, 32, 3)):
    model = models.Sequential()

    model.add(
        layers.Conv2D(6,
                      input_shape=input_shape,
                      kernel_size=5,
                      activation='tanh'))
    model.add(layers.AvgPool2D(pool_size=(2, 2), strides=2))

    model.add(layers.Conv2D(16, activation='tanh', kernel_size=5))
    model.add(layers.AvgPool2D(pool_size=(2, 2), strides=2))

    model.add(layers.Flatten())
    model.add(layers.Dense(
        120,
        activation='tanh',
    ))
    model.add(layers.Dense(84, activation='tanh'))

    model.add(layers.Dense(num_classes, activation='softmax'))

    return model
コード例 #14
0
 def __init__(self):
   super().__init__()
   self.model = tf.keras.Sequential()
   self.model.add(make_conv(32, 3))
   self.model.add(Layers.MaxPool2D(padding='same'))
   self.model.add(InceptionBlock(20, 8, 20, 6, 12, 12))
   self.model.add(InceptionBlock(20, 8, 20, 6, 12, 12))
   self.model.add(Layers.MaxPool2D(padding='same'))
   self.model.add(InceptionBlock(36, 12, 36, 10, 28, 28))
   self.model.add(InceptionBlock(36, 12, 36, 10, 28, 28))
   self.model.add(InceptionBlock(36, 12, 36, 10, 28, 28))
   self.model.add(Layers.AvgPool2D(padding='same'))
   self.model.add(Layers.Flatten())
   self.model.add(Layers.Dropout(0.5))
   self.model.add(Layers.Dense(10, activation='softmax'))
コード例 #15
0
ファイル: model.py プロジェクト: yusufdalva/CS559_Homework
 def pool_layer(self, layer_metadata):
     if layer_metadata["pool_type"] == "avg":
         return layers.AvgPool2D(pool_size=layer_metadata["pool_size"],
                                 strides=layer_metadata["strides"],
                                 padding=layer_metadata["padding"],
                                 data_format=self.data_format)
     elif layer_metadata["pool_type"] == "max":
         return layers.MaxPool2D(pool_size=layer_metadata["pool_size"],
                                 strides=layer_metadata["strides"],
                                 padding=layer_metadata["padding"],
                                 data_format=self.data_format)
     else:
         raise ValueError(
             "Invalid pooling layer type for {}, should be one of (avg, max)"
             .format(layer_metadata))
コード例 #16
0
ファイル: oct_conv2d.py プロジェクト: pykeras/OctConv-Keras
 def call(self, inputs):
     # Input = [X^H, X^L]
     high_input, low_input = inputs
     assert len(inputs) == 2
     # High -> High conv
     high_to_high = layers.Conv2DTranspose(
         self.high_channels,
         self.kernel_size,
         strides=self.strides,
         padding=self.padding,
         data_format="channels_last",
         kernel_regularizer=self.kernel_regularizer)(high_input)
     # High -> Low conv
     high_to_low = layers.AvgPool2D((2, 2), strides=(2, 2))(high_input)
     high_to_low = layers.Conv2DTranspose(
         self.low_channels,
         self.kernel_size,
         strides=self.strides,
         padding=self.padding,
         data_format="channels_last",
         kernel_regularizer=self.kernel_regularizer)(high_to_low)
     # Low -> High conv
     low_to_high = layers.Conv2DTranspose(
         self.high_channels,
         self.kernel_size,
         strides=self.strides,
         padding=self.padding,
         data_format="channels_last",
         kernel_regularizer=self.kernel_regularizer)(low_input)
     low_to_high = layers.UpSampling2D(
         (2, 2), data_format='channels_last', interpolation='nearest')(
             low_to_high)  # Nearest Neighbor Upsampling
     # low_to_high = K.repeat_elements(low_to_high, 2, axis=2)
     # Low -> Low conv
     low_to_low = layers.Conv2DTranspose(
         self.low_channels,
         self.kernel_size,
         strides=self.strides,
         padding=self.padding,
         data_format="channels_last",
         kernel_regularizer=self.kernel_regularizer)(low_input)
     # Cross Add
     high_add = high_to_high + low_to_high
     low_add = high_to_low + low_to_low
     return [high_add, low_add]
コード例 #17
0
def _inception_output(input, num_classes):

    x = layers.AvgPool2D(pool_size=(5, 5), strides=1, padding='valid')(input)
    x = layers.Conv2D(128,
                      activation='relu',
                      kernel_size=1,
                      strides=1,
                      padding='same')(x)
    x = tf.keras.layers.Flatten()(x)
    x = layers.Dense(1024,
                     activation='relu',
                     kernel_size=1,
                     strides=1,
                     padding='same')(x)
    x = layers.Dropout(0.7)(x)
    out = layers.Dense(num_classes, activation='softmax')(x)

    return out
コード例 #18
0
ファイル: networks.py プロジェクト: lilujunai/lepetite
 def _make_layers(self):
   m = tf.keras.Sequential()
   for s in self.child_states:
     if s[0]==2:
       m.add(layers.MaxPool2D(pool_size=s[1], 
                              strides=s[2], 
                              padding='same'))
     elif s[0]==1:
       m.add(layers.Conv2D(filters=s[3], 
                           kernel_size=s[1], 
                           strides=s[2],
                           padding='same'))
       m.add(layers.ReLU())
     else:
       pass
   m.add(layers.AvgPool2D(pool_size=1, strides=1, padding='same'))
   m.add(layers.Flatten())
   return m
コード例 #19
0
ファイル: model.py プロジェクト: huangjunxiong11/TF2
def GoogLeNet(im_height=224, im_width=224, class_num=1000, aux_logits=False):
    # 输入224*224的3通道彩色图片
    input_image = layers.Input(shape=(im_height, im_width, 3), dtype="float32")
    x = layers.Conv2D(64, kernel_size=7, strides=2, padding="SAME", activation="relu", name="conv2d_1")(input_image)
    x = layers.MaxPool2D(pool_size=3, strides=2, padding="SAME", name="maxpool_1")(x)
    x = layers.Conv2D(64, kernel_size=1, activation="relu", name="conv2d_2")(x)
    x = layers.Conv2D(192, kernel_size=3, padding="SAME", activation="relu", name="conv2d_3")(x)
    x = layers.MaxPool2D(pool_size=3, strides=2, padding="SAME", name="maxpool_2")(x)
    # Inception模块
    x = Inception(64, 96, 128, 16, 32, 32, name="inception_3a")(x)
    x = Inception(128, 128, 192, 32, 96, 64, name="inception_3b")(x)
    x = layers.MaxPool2D(pool_size=3, strides=2, padding="SAME", name="maxpool_3")(x)
    # Inception模块
    x = Inception(192, 96, 208, 16, 48, 64, name="inception_4a")(x)
    # 判断是否使用辅助分类器1。训练时使用,测试时去掉。
    if aux_logits:
        aux1 = InceptionAux(class_num, name="aux_1")(x)
    # Inception模块
    x = Inception(160, 112, 224, 24, 64, 64, name="inception_4b")(x)
    x = Inception(128, 128, 256, 24, 64, 64, name="inception_4c")(x)
    x = Inception(112, 144, 288, 32, 64, 64, name="inception_4d")(x)
    # 判断是否使用辅助分类器2。训练时使用,测试时去掉。
    if aux_logits:
        aux2 = InceptionAux(class_num, name="aux_2")(x)
    # Inception模块
    x = Inception(256, 160, 320, 32, 128, 128, name="inception_4e")(x)
    x = layers.MaxPool2D(pool_size=3, strides=2, padding="SAME", name="maxpool_4")(x)
    # Inception模块
    x = Inception(256, 160, 320, 32, 128, 128, name="inception_5a")(x)
    x = Inception(384, 192, 384, 48, 128, 128, name="inception_5b")(x)
    # 平均池化层
    x = layers.AvgPool2D(pool_size=7, strides=1, name="avgpool_1")(x)
    # 拉直
    x = layers.Flatten(name="output_flatten")(x)
    x = layers.Dropout(rate=0.4, name="output_dropout")(x)
    x = layers.Dense(class_num, name="output_dense")(x)
    aux3 = layers.Softmax(name="aux_3")(x)
    # 判断是否使用辅助分类器
    if aux_logits:
        model = models.Model(inputs=input_image, outputs=[aux1, aux2, aux3])
    else:
        model = models.Model(inputs=input_image, outputs=aux3)
    return model
コード例 #20
0
    def __init__(self,
                 out_features,
                 norm=False,
                 kernel_size=4,
                 pool=False,
                 sn=False):
        super(DownBlock2d, self).__init__()
        self.conv = layers.Conv2D(out_features,
                                  kernel_size=kernel_size,
                                  padding='SAME')

        if sn:
            self.conv = addons_layers.SpectralNormalization(self.conv)

        if norm:
            self.norm = addons_layers.InstanceNormalization()
        else:
            self.norm = None
        self.pool = pool
        if self.pool:
            self.avg_pool = layers.AvgPool2D(pool_size=(2, 2))
コード例 #21
0
def AOI_model():
    inputs_image = tf.keras.Input(shape=(512, 512, 1), name='input_image')
    x = inputs_image

    x = layers.Conv2D(32, 3, strides=(2,2))(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)
    x = layers.MaxPool2D()(x)

    x = layers.Conv2D(64, 3, strides=(2,2))(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)
    x = layers.MaxPool2D()(x)

    for _ in range(2):
        x = ResBlockBottleneck(64, 64, 256)(x)
    x = ResBlockBottleneck(64, 64, 256, strides=(2,2))(x)

    for _ in range(3):
        x = ResBlockBottleneck(128, 128, 512)(x)
    x = ResBlockBottleneck(128, 128, 512, strides=(2,2))(x)

    for _ in range(5):
        x = ResBlockBottleneck(256, 256, 1024)(x)
    x = ResBlockBottleneck(256, 256, 1024, strides=(2,2))(x)

    for _ in range(2):
        x = ResBlockBottleneck(512, 512, 2048)(x)
    x = ResBlockBottleneck(512, 512, 2048, strides=(2,2))(x)

    x = layers.AvgPool2D()(x)
    x = layers.Flatten()(x)
    x = layers.Dense(100, activation='relu')(x)
    x = layers.Dense(6, activation='softmax')(x)

    model = tf.keras.Model(inputs=inputs_image, outputs=x)
    model.compile(loss='sparse_categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(), metrics=['acc'])

    return model
コード例 #22
0
    def _make_layer(
            self,
            block,
            layer,
            in_channels,
            out_channels,
            reduce_spatial_size,
    ):
        m_layers = [block(in_channels, out_channels)]

        for i in range(1, layer):
            m_layers.append(block(out_channels, out_channels))

        if reduce_spatial_size:
            m_layers.append(
                tf.keras.Sequential([
                    Conv1x1TF(out_channels, out_channels),
                    layers.AvgPool2D(2, strides=2)
                ])
            )

        return tf.keras.Sequential(m_layers)
コード例 #23
0
                                  padding=padding)


def upsample_simple(filters, kernel_size, strides, padding):
    return layers.UpSampling2D(strides)


if UPSAMPLE_MODE == 'DECONV':
    upsample = upsample_conv
else:
    upsample = upsample_simple

input_img = layers.Input(t_x.shape[1:], name='RGB_Input')
pp_in_layer = input_img
if NET_SCALING is not None:
    pp_in_layer = layers.AvgPool2D(NET_SCALING)(pp_in_layer)

pp_in_layer = layers.GaussianNoise(GAUSSIAN_NOISE)(pp_in_layer)
pp_in_layer = layers.BatchNormalization()(pp_in_layer)

c1 = layers.Conv2D(8, (3, 3),
                   kernel_initializer='he_uniform',
                   bias_initializer='zeros',
                   activation='relu',
                   padding='same')(pp_in_layer)
c1 = layers.Conv2D(8, (3, 3),
                   kernel_initializer='he_uniform',
                   bias_initializer='zeros',
                   activation='relu',
                   padding='same')(c1)
p1 = layers.MaxPooling2D((2, 2))(c1)
コード例 #24
0
def inception(num_classes, input_shape, output_layes, maxpool_layers, depth=1):
    # if is isinstance(output_layes, (list,set,tuple)):
    #     output_layes = set(output_layes)
    # else:
    #     raise("Enter a valid output_layers")

    #if depth < len(maxpool_layers) or depth< len(output_layes):
    #    raise("Enter a valid depth")

    inputs = tf.keras.Input(shape=input_shape, name='img')

    x = layers.Conv2D(64,
                      activation='relu',
                      kernel_size=7,
                      strides=2,
                      padding='same')(inputs)
    x = layers.MaxPool2D(pool_size=(3, 3), strides=2, padding='same')(x)
    x = layers.Conv2D(192,
                      activation='relu',
                      kernel_size=1,
                      strides=1,
                      padding='same')(x)
    x = layers.Conv2D(256,
                      activation='relu',
                      kernel_size=3,
                      strides=1,
                      padding='same')(x)

    x = layers.MaxPool2D(pool_size=(3, 3), strides=2, padding='same')(x)
    x = _inception_block(x, num_filters=512, activation='relu')

    x = _inception_block(x, num_filters=512, activation='relu')

    x = layers.MaxPool2D(pool_size=(3, 3), strides=2, padding='same')(x)
    x = _inception_block(x, num_filters=512, activation='relu')

    out1 = _inception_output(x, num_classes)
    x = _inception_block(x, num_filters=512, activation='relu')

    x = _inception_block(x, num_filters=512, activation='relu')

    x = _inception_block(x, num_filters=512, activation='relu')

    out2 = _inception_output(x, num_classes)
    x = _inception_block(x, num_filters=512, activation='relu')

    x = layers.MaxPool2D(pool_size=(3, 3), strides=2, padding='same')(x)
    x = _inception_block(x, num_filters=512, activation='relu')

    x = _inception_block(x, num_filters=512, activation='relu')

    x = layers.AvgPool2D(pool_size=(7, 7), strides=1, padding='valid')(x)
    x = tf.keras.layers.Flatten()(x)
    x = layers.Dense(256,
                     activation='relu',
                     kernel_size=1,
                     strides=1,
                     padding='same')(x)
    out3 = layers.Dense(num_classes, activation='softmax')(x)

    model = tf.keras.Model(inputs=inputs,
                           outputs=[out1, out2, out3],
                           name='inception')
    return model
コード例 #25
0
    def __init__(self, out_dim, f=0.25):
        super(TCResNet8, self).__init__()

        # constants
        FIRST_CONV_KERNEL = [3, 1]
        ANYOTHER_CONV_KERNEL = [9, 1]
        SHORTCUT_CONV_KERNEL = [1, 1]
        L2_REGULIZER_VAL = 1e-4
        DROP_PROBABILITY = 0.5

        # begin
        self.conv1 = l.Conv2D(int(16 * f), FIRST_CONV_KERNEL, strides=1, padding='same',
                              use_bias=False)

        # for block1:
        self.conv2 = l.Conv2D(filters=int(24 * f), kernel_size=ANYOTHER_CONV_KERNEL, strides=2, padding='same',
                              use_bias=False, kernel_initializer="he_normal",
                              kernel_regularizer=reg.l2(L2_REGULIZER_VAL))
        self.bn1_1 = l.BatchNormalization(scale=True, trainable=True)
        self.activ1_1 = l.Activation('relu')
        self.conv3 = l.Conv2D(int(24 * f), ANYOTHER_CONV_KERNEL, strides=1, padding='same',
                              use_bias=False, kernel_initializer="he_normal",
                              kernel_regularizer=reg.l2(L2_REGULIZER_VAL))
        self.bn1_2 = l.BatchNormalization(scale=True, trainable=True)
        self.conv4 = l.Conv2D(int(24 * f), SHORTCUT_CONV_KERNEL, strides=2, padding='same',
                              use_bias=False, kernel_initializer="he_normal",
                              kernel_regularizer=reg.l2(L2_REGULIZER_VAL))
        self.bn1_1_1 = l.BatchNormalization(scale=True, trainable=True)
        self.activ1_1_1 = l.Activation('relu')
        self.activ1_2 = l.Activation('relu')

        # for block2:
        self.conv5 = l.Conv2D(int(32 * f), ANYOTHER_CONV_KERNEL, strides=2, padding='same',
                              use_bias=False, kernel_initializer="he_normal",
                              kernel_regularizer=reg.l2(L2_REGULIZER_VAL))
        self.bn2_1 = l.BatchNormalization(scale=True, trainable=True)
        self.activ2_1 = l.Activation('relu')
        self.conv6 = l.Conv2D(int(32 * f), ANYOTHER_CONV_KERNEL, strides=1, padding='same',
                              use_bias=False, kernel_initializer="he_normal",
                              kernel_regularizer=reg.l2(L2_REGULIZER_VAL))
        self.bn2_2 = l.BatchNormalization(scale=True, trainable=True)
        self.conv7 = l.Conv2D(int(32 * f), SHORTCUT_CONV_KERNEL, strides=2, padding='same',
                              use_bias=False, kernel_initializer="he_normal",
                              kernel_regularizer=reg.l2(L2_REGULIZER_VAL))
        self.bn2_1_1 = l.BatchNormalization()
        self.activ2_1_1 = l.Activation('relu')
        self.activ2_2 = l.Activation('relu')

        # for block3:
        self.conv8 = l.Conv2D(int(48 * f), ANYOTHER_CONV_KERNEL, strides=2, padding='same',
                              use_bias=False, kernel_initializer="he_normal",
                              kernel_regularizer=reg.l2(L2_REGULIZER_VAL))
        self.bn3_1 = l.BatchNormalization(scale=True, trainable=True)
        self.activ3_1 = l.Activation('relu')
        self.conv9 = l.Conv2D(int(48 * f), ANYOTHER_CONV_KERNEL, strides=1, padding='same',
                              use_bias=False, kernel_initializer="he_normal",
                              kernel_regularizer=reg.l2(L2_REGULIZER_VAL))
        self.bn3_2 = l.BatchNormalization(scale=True, trainable=True)
        self.conv10 = l.Conv2D(int(48 * f), SHORTCUT_CONV_KERNEL, strides=2, padding='same',
                              use_bias=False, kernel_initializer="he_normal",
                              kernel_regularizer=reg.l2(L2_REGULIZER_VAL))
        self.bn3_1_1 = l.BatchNormalization()
        self.activ3_1_1 = l.Activation('relu')
        self.activ3_2 = l.Activation('relu')

        # avg pooling 2D
        self.avg_pool = l.AvgPool2D(strides=1)

        self.out_block = tf.keras.Sequential([
            l.Flatten(),
            l.Activation('relu'),
            l.Dropout(DROP_PROBABILITY),
            l.Dense(int(46 * f)),
            l.Activation('softmax'),
            l.Dense(out_dim)]
        )
コード例 #26
0
def InceptionV1(im_height=224, im_width=224, class_num=1000, aux_logits=False):
    # tensorflow中的tensor通道排序是NHWC
    input_image = layers.Input(shape=(im_height, im_width, 3), dtype="float32")
    # (None, 224, 224, 3)
    x = layers.Conv2D(64, kernel_size=7, strides=2, padding="SAME", use_bias=False, name="conv1/conv")(input_image)
    x = layers.BatchNormalization(momentum=0.9, epsilon=1e-5, name="conv1/bn")(x)
    x = layers.ReLU()(x)
    # (None, 112, 112, 64)
    x = layers.MaxPool2D(pool_size=3, strides=2, padding="SAME", name="maxpool_1")(x)
    # (None, 56, 56, 64)
    x = layers.Conv2D(64, kernel_size=1, use_bias=False, name="conv2/conv")(x)
    x = layers.BatchNormalization(momentum=0.9, epsilon=1e-5, name="conv2/bn")(x)
    x = layers.ReLU()(x)
    # (None, 56, 56, 64)
    x = layers.Conv2D(192, kernel_size=3, padding="SAME", use_bias=False, name="conv3/conv")(x)
    x = layers.BatchNormalization(momentum=0.9, epsilon=1e-5, name="conv3/bn")(x)
    x = layers.ReLU()(x)
    # (None, 56, 56, 192)
    x = layers.MaxPool2D(pool_size=3, strides=2, padding="SAME", name="maxpool_2")(x)

    # (None, 28, 28, 192)
    x = Inception(64, 96, 128, 16, 32, 32, name="inception3a")(x)
    # (None, 28, 28, 256)
    x = Inception(128, 128, 192, 32, 96, 64, name="inception3b")(x)

    # (None, 28, 28, 480)
    x = layers.MaxPool2D(pool_size=3, strides=2, padding="SAME", name="maxpool_3")(x)
    # (None, 14, 14, 480)
    x = Inception(192, 96, 208, 16, 48, 64, name="inception4a")(x)
    if aux_logits:
        aux1 = InceptionAux(class_num, name="aux1")(x)

    # (None, 14, 14, 512)
    x = Inception(160, 112, 224, 24, 64, 64, name="inception4b")(x)
    # (None, 14, 14, 512)
    x = Inception(128, 128, 256, 24, 64, 64, name="inception4c")(x)
    # (None, 14, 14, 512)
    x = Inception(112, 144, 288, 32, 64, 64, name="inception4d")(x)
    if aux_logits:
        aux2 = InceptionAux(class_num, name="aux2")(x)

    # (None, 14, 14, 528)
    x = Inception(256, 160, 320, 32, 128, 128, name="inception4e")(x)
    # (None, 14, 14, 532)
    x = layers.MaxPool2D(pool_size=2, strides=2, padding="SAME", name="maxpool_4")(x)

    # (None, 7, 7, 832)
    x = Inception(256, 160, 320, 32, 128, 128, name="inception5a")(x)
    # (None, 7, 7, 832)
    x = Inception(384, 192, 384, 48, 128, 128, name="inception5b")(x)
    # (None, 7, 7, 1024)
    x = layers.AvgPool2D(pool_size=7, strides=1, name="avgpool_1")(x)

    # (None, 1, 1, 1024)
    x = layers.Flatten(name="output_flatten")(x)
    # (None, 1024)
    x = layers.Dropout(rate=0.4, name="output_dropout")(x)
    x = layers.Dense(class_num, name="fc")(x)
    # (None, class_num)
    aux3 = layers.Softmax()(x)

    if aux_logits:
        model = models.Model(inputs=input_image, outputs=[aux1, aux2, aux3])
    else:
        model = models.Model(inputs=input_image, outputs=aux3)
    return model
コード例 #27
0
def GoogLeNet(img_height=224, img_width=224, class_num=1000, aux_logits=False):
    """
    GoogLeNet网络
    :param img_height:
    :param img_width:
    :param class_num:
    :param aux_logits:
    :return:
    """
    input_image = layers.Input(shape=(img_height, img_width, 3),
                               dtype='float32')
    # (None, 224, 224, 3)
    x = layers.Conv2D(64,
                      kernel_size=7,
                      strides=2,
                      padding='same',
                      use_bias=False,
                      name='conv1/conv')(input_image)
    x = layers.BatchNormalization(momentum=0.9, epsilon=1e-5,
                                  name='conv1/bn')(x)
    x = layers.ReLU()(x)
    # (None, 112, 112, 64)
    x = layers.MaxPool2D(pool_size=3,
                         strides=2,
                         padding='same',
                         name='maxpool_1')(x)
    # (None, 56, 56, 64)

    x = layers.Conv2D(64, kernel_size=1, use_bias=False, name='conv2/conv')(x)
    x = layers.BatchNormalization(momentum=0.9, epsilon=1e-5,
                                  name='conv2/bn')(x)
    x = layers.ReLU()(x)
    # (None, 56, 56, 64)
    x = layers.Conv2D(192,
                      kernel_size=3,
                      padding='same',
                      use_bias=False,
                      name='conv3/conv')(x)
    x = layers.BatchNormalization(momentum=0.9, epsilon=1e-5,
                                  name='conv3/bn')(x)
    x = layers.ReLU()(x)
    # (None, 56, 56, 192)
    x = layers.MaxPool2D(pool_size=3,
                         strides=2,
                         padding='same',
                         name='maxpool_2')(x)

    # (None, 28, 28, 192)
    x = Inception(64, 96, 128, 16, 32, 32, name='inception3a')(x)
    # (None, 28, 28, 256)
    x = Inception(128, 128, 192, 32, 96, 64, name='inception3b')(x)

    # (None, 28, 28, 480)
    x = layers.MaxPool2D(pool_size=3,
                         strides=2,
                         padding='same',
                         name='maxpool_3')(x)
    # (None, 14, 14, 480)
    x = Inception(192, 96, 208, 16, 48, 64, name='inception4a')(x)
    if aux_logits:
        aux1 = InceptionAux(class_num, name='aux1')(x)

    # (None, 14, 14, 512)
    x = Inception(160, 112, 224, 24, 64, 64, name='inception4b')(x)
    # (None, 14, 14, 512)
    x = Inception(128, 128, 256, 24, 64, 64, name='inception4c')(x)
    # (None, 14, 14, 512)
    x = Inception(112, 144, 288, 32, 64, 64, name='inception4d')(x)
    if aux_logits:
        aux2 = InceptionAux(class_num, name='aux2')(x)

    # (None, 14, 14, 528)
    x = Inception(256, 160, 320, 32, 128, 128, name='inception4e')(x)
    # (None, 14, 14, 832)
    x = layers.MaxPool2D(pool_size=2,
                         strides=2,
                         padding='same',
                         name='maxpool_4')(x)

    # (None, 7, 7, 832)
    x = Inception(256, 160, 320, 32, 128, 128, name='inception5a')(x)
    # (None, 7, 7, 832)
    x = Inception(384, 192, 384, 48, 128, 128, name='inception5b')(x)
    # (None, 7, 7, 1024)
    x = layers.AvgPool2D(pool_size=7, strides=1, name='avgpool_1')(x)

    # (None, 1, 1, 1024)
    x = layers.Flatten(name='output_flatten')(x)
    # (None, 1024)
    x = layers.Dropout(rate=0.4, name='output_dropout')(x)
    x = layers.Dense(class_num, name='fc')(x)
    # (None, num_class)
    aux3 = layers.Softmax()(x)

    if aux_logits:
        model = models.Model(inputs=input_image, outputs=[aux1, aux2, aux3])
    else:
        model = models.Model(inputs=input_image, outputs=aux3)
    return model
コード例 #28
0
def Model_PSP(pre_trained_model, num_classes=35):
    last_pretrained_layer = pre_trained_model.get_layer('conv3_block4_out')
    last_output = last_pretrained_layer.output
    last_output = layers.Conv2D(filters=128,
                                kernel_size=(1, 1),
                                name='Compress_out')(last_output)

    #Define the params for the pooling module
    #This has to be 1/4 times the input channel depth
    INPUT_CHANNEL_DEPTH = 128
    INPUT_DIM = 32
    TARGET_CHANNEL_DEPTH = INPUT_CHANNEL_DEPTH / 4
    Y_KERNEL_DIM = (INPUT_DIM // 2, INPUT_DIM // 2)
    B_KERNEL_DIM = (INPUT_DIM // 4, INPUT_DIM // 4)
    G_KERNEL_DIM = (INPUT_DIM // 8, INPUT_DIM // 8)
    #Now we define the pyramidal pooling architecture
    base = last_output
    #Define the GAP with 1*1 block size for 1x1 bin
    red_blk = layers.GlobalAvgPool2D(name='red_block_pooling')(base)
    red_blk = layers.Reshape((1, 1, INPUT_CHANNEL_DEPTH))(red_blk)
    red_blk = layers.Conv2D(filters=TARGET_CHANNEL_DEPTH,
                            kernel_size=(1, 1),
                            name='red_1x1_conv')(red_blk)
    red_blk = layers.UpSampling2D(size=(256, 256),
                                  interpolation='bilinear',
                                  name='red_upsample')(red_blk)

    #Define the average pooling for the yellow block for 2x2 bin
    y_blk = layers.AvgPool2D(pool_size=Y_KERNEL_DIM,
                             name='yellow_blk_pooling')(base)
    y_blk = layers.Conv2D(filters=TARGET_CHANNEL_DEPTH,
                          kernel_size=(1, 1),
                          name='yellow_1x1_conv')(y_blk)
    y_blk = layers.UpSampling2D(size=(128, 128),
                                interpolation='bilinear',
                                name='yellow_upsample')(y_blk)

    #Define the average pooling for the blue block for 4x4 bin
    blue_blk = layers.AvgPool2D(pool_size=B_KERNEL_DIM,
                                name='blue_blk_pooling')(base)
    blue_blk = layers.Conv2D(filters=TARGET_CHANNEL_DEPTH,
                             kernel_size=(1, 1),
                             name='blue_1x1_conv')(blue_blk)
    blue_blk = layers.UpSampling2D(size=(64, 64),
                                   interpolation='bilinear',
                                   name='blue_upsample')(blue_blk)

    #Define the average pooling for the green block for 8x8 bins
    green_blk = layers.AvgPool2D(pool_size=G_KERNEL_DIM,
                                 name='green_blk_pooling')(base)
    green_blk = layers.Conv2D(filters=TARGET_CHANNEL_DEPTH,
                              kernel_size=(1, 1),
                              name='green_1x1_conv')(green_blk)
    green_blk = layers.UpSampling2D(size=(32, 32),
                                    interpolation='bilinear',
                                    name='green_upsample')(green_blk)

    #Now we upsample the base and check all output shapes to ensure that they match
    base = layers.UpSampling2D(size=(256 // INPUT_DIM, 256 // INPUT_DIM),
                               interpolation='bilinear',
                               name='base_upsample')(base)
    print(base.get_shape)
    print(red_blk.get_shape)
    print(y_blk.get_shape)
    print(blue_blk.get_shape)
    print(green_blk.get_shape)

    #Generate the final output and check shape
    PPM = tf.keras.layers.concatenate(
        [base, green_blk, blue_blk, y_blk, red_blk])
    print(PPM.get_shape)

    #Now we define the final convolutional block
    output = layers.Conv2D(filters=num_classes,
                           kernel_size=(3, 3),
                           padding='same',
                           name='final_3x3_conv_blk',
                           activation='softmax')(PPM)
    return output
コード例 #29
0
def build_generator():
    input = tf.keras.Input(shape=(L_node, W_node,
                                  Time_steps))  # (None, 256, 256, 5)

    l1 = input  # (None, 256, 256, 5)

    l2_1 = layers.Conv2D(64, 1, 1, 'same',
                         activation='relu')(l1)  # (None, 256, 256, 64)
    l2_1 = layers.BatchNormalization()(l2_1)

    l2_2 = layers.Conv2D(48, 1, 1, 'same',
                         activation='relu')(l1)  # (None, 256, 256, 48)
    l2_2 = layers.BatchNormalization()(l2_2)
    l2_2 = layers.Conv2D(64, 3, 1, 'same',
                         activation='relu')(l2_2)  # (None, 256, 256, 64)
    l2_2 = layers.BatchNormalization()(l2_2)

    l2_3 = layers.Conv2D(48, 1, 1, 'same',
                         activation='relu')(l1)  # (None, 256, 256, 48)
    l2_3 = layers.BatchNormalization()(l2_3)
    l2_3 = layers.Conv2D(64, 5, 1, 'same',
                         activation='relu')(l2_3)  # (None, 256, 256, 64)
    l2_3 = layers.BatchNormalization()(l2_3)

    l2_4 = layers.AvgPool2D(3, 1, 'same')(l1)
    l2_4 = layers.Conv2D(64, 1, 1, 'same',
                         activation='relu')(l2_4)  # (None, 256, 256, 64)
    l2_4 = layers.BatchNormalization()(l2_4)

    l2 = layers.concatenate([l2_1, l2_2, l2_3, l2_4],
                            3)  # (None, 256, 256, 256)

    l3 = layers.Conv2D(64, 3, 1, 'same',
                       activation='relu')(l2)  # (None, 256, 256, 64)

    l4_1 = layers.Conv2D(128, 3, 2, 'same')(l3)  # (None, 128, 128, 128)
    l4_1 = layers.BatchNormalization()(l4_1)
    l4_1 = layers.LeakyReLU(0.2)(l4_1)

    l5_1 = layers.Conv2D(256, 3, 2, 'same')(l4_1)  # (None, 64, 64, 256)
    l5_1 = layers.BatchNormalization()(l5_1)
    l5_1 = layers.LeakyReLU(0.2)(l5_1)

    l6_1 = layers.Conv2D(512, 3, 2, 'same')(l5_1)  # (None, 32, 32, 512)
    l6_1 = layers.BatchNormalization()(l6_1)
    l6_1 = layers.LeakyReLU(0.2)(l6_1)

    l7_1 = layers.Conv2D(512, 3, 2, 'same')(l6_1)  # (None, 16, 16, 512)
    l7_1 = layers.BatchNormalization()(l7_1)
    l7_1 = layers.LeakyReLU(0.2)(l7_1)

    l8_1 = layers.Conv2DTranspose(512, 3, 2,
                                  'same')(l7_1)  # (None, 32, 32, 512)
    l8_1 = layers.BatchNormalization()(l8_1)
    l8_1 = layers.Dropout(0.3)(l8_1)
    l8_1 = layers.Activation('relu')(l8_1)

    l9_1 = layers.Conv2DTranspose(256, 3, 2,
                                  'same')(l8_1)  # (None, 64, 64, 256)
    l9_1 = layers.BatchNormalization()(l9_1)
    l9_1 = layers.Dropout(0.3)(l9_1)
    l9_1 = layers.Activation('relu')(l9_1)

    l10_1 = layers.Conv2DTranspose(128, 3, 2,
                                   'same')(l9_1)  # (None, 128, 128, 128)
    l10_1 = layers.BatchNormalization()(l10_1)
    l10_1 = layers.Dropout(0.3)(l10_1)
    l10_1 = layers.Activation('relu')(l10_1)

    l11_1 = layers.Conv2DTranspose(64, 3, 2,
                                   'same')(l10_1)  # (None, 256, 256, 64)
    l11_1 = layers.BatchNormalization()(l11_1)
    l11_1 = layers.Dropout(0.3)(l11_1)
    l11_1 = layers.Activation('tanh')(l11_1)

    # l4_2 = layers.Conv2D(32, 3, 2, 'same')(l3)  # (None, 128, 128, 32)

    l5_2 = layers.Conv2D(5, 3, 2, 'same')(l3)  # (None, 256, 256, 5)
    l5_2_1 = tf.reshape(l5_2[:, :, :, 0],
                        shape=(-1, 1, LSTM_layer, LSTM_layer, 1))
    l5_2_2 = tf.reshape(l5_2[:, :, :, 1],
                        shape=(-1, 1, LSTM_layer, LSTM_layer, 1))
    l5_2_3 = tf.reshape(l5_2[:, :, :, 2],
                        shape=(-1, 1, LSTM_layer, LSTM_layer, 1))
    l5_2_4 = tf.reshape(l5_2[:, :, :, 3],
                        shape=(-1, 1, LSTM_layer, LSTM_layer, 1))
    l5_2_5 = tf.reshape(l5_2[:, :, :, 4],
                        shape=(-1, 1, LSTM_layer, LSTM_layer, 1))
    l5_2 = layers.concatenate([l5_2_1, l5_2_2, l5_2_3, l5_2_4, l5_2_5],
                              1)  # (None, 5, 256, 256, 1)

    l6_2 = layers.ConvLSTM2D(5, 3, 1, 'same', return_sequences=True)(
        l5_2)  # (None, 5, 256, 256, 10)

    l7_2 = layers.ConvLSTM2D(10, 3, 1, 'same', return_sequences=True)(
        l6_2)  # (None, 5, 256, 256, 20)

    # l8_2 = layers.ConvLSTM2D(10, 3, 1, 'same', return_sequences=True)(l7_2)  # (None, 5, 256, 256, 20)

    l9_2 = layers.ConvLSTM2D(5, 3, 1, 'same', return_sequences=False)(
        l7_2)  # (None, 256, 256, 10)

    # l10_2 = layers.Conv2DTranspose(32, 3, 2, 'same')(l9_2)  # (None, 128, 128, 32)
    # l10_2 = layers.BatchNormalization()(l10_2)
    # l10_2 = layers.Dropout(0.3)(l10_2)
    # l10_2 = layers.Activation('relu')(l10_2)

    l11_2 = layers.Conv2DTranspose(64, 3, 2,
                                   'same')(l9_2)  # (None, 256, 256, 64)
    l11_2 = layers.BatchNormalization()(l11_2)
    l11_2 = layers.Dropout(0.3)(l11_2)
    l11_2 = layers.Activation('relu')(l11_2)

    # l11_2 = layers.Conv2D(64, 5, 1, 'same', activation='relu')(l9_2)

    l12 = layers.add([l11_1, l11_2])  # (None, 256, 256, 64)

    l13 = layers.Conv2D(1, 3, 1, 'same',
                        activation='tanh')(l12)  # (None, 256, 256, 1)

    Model = tf.keras.Model(input, l13, name='generator')
    return Model
コード例 #30
0
                  kernel_size=(3, 3),
                  strides=1))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), strides=1))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), strides=1))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.Dropout(0.5))
model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), strides=1))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.Dropout(0.5))
model.add(layers.AvgPool2D())
model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), strides=1))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.Dropout(0.5))
model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), strides=1))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.Dropout(0.5))
model.add(layers.MaxPool2D())
# Group - 2
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=1))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.Dropout(0.5))
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=1))