예제 #1
0
    def olliNetwork(self):
        self.model = models.Sequential()

        self.model.add(
            layers.Conv2D(64, (5, 5),
                          activation='relu',
                          input_shape=(48, 48, 1)))
        self.model.add(layers.Conv2D(64, (5, 5), activation='relu'))
        self.model.add(layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
        self.model.add(layers.Dropout(0.25))

        self.model.add(layers.Conv2D(64, (5, 5), activation='relu'))
        self.model.add(layers.Conv2D(64, (5, 5), activation='relu'))
        self.model.add(layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
        self.model.add(layers.Dropout(0.25))

        self.model.add(layers.Conv2D(128, (4, 4), activation='relu'))
        self.model.add(layers.MaxPooling2D(pool_size=(2, 2)))
        self.model.add(layers.Dropout(0.25))

        self.model.add(layers.Flatten())
        self.model.add(layers.Dense(3072, activation='relu'))
        self.model.add(layers.Dropout(0.5))
        self.model.add(layers.Dense(128, activation='relu'))
        self.model.add(layers.Dropout(0.5))
        self.model.add(layers.Dense(3, activation='softmax'))
def VGG6(inputs, n_class=10):
    # Block 1
    x = layers.Conv2D(32, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block1_conv1')(inputs)
    x = layers.Conv2D(32, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block1_conv2')(x)
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

    # Block 2
    x = layers.Conv2D(64, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block2_conv1')(x)
    x = layers.Conv2D(64, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block2_conv2')(x)
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

    x = layers.Flatten(name='flatten')(x)
    x = layers.Dense(512, activation='relu', name='fc1')(x)
    features = layers.Dense(512, activation='relu', name='fc2')(x)
    outputs = layers.Dense(n_class, activation='softmax',
                           name='predictions')(features)

    return outputs
예제 #3
0
    def model_definition(self):
        self.model = models.Sequential()

        self.model.add(
            layers.Conv2D(64, (5, 5),
                          activation='relu',
                          input_shape=self.input_shape))
        self.model.add(layers.Conv2D(64, (5, 5), activation='relu'))
        self.model.add(layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
        self.model.add(layers.Dropout(0.25))

        self.model.add(layers.Conv2D(64, (5, 5), activation='relu'))
        self.model.add(layers.Conv2D(64, (5, 5), activation='relu'))
        self.model.add(layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
        self.model.add(layers.Dropout(0.25))

        self.model.add(layers.Conv2D(128, (3, 3), activation='relu'))
        self.model.add(layers.AveragePooling2D())
        self.model.add(layers.Conv2D(128, (1, 1), activation='relu'))
        self.model.add(layers.MaxPooling2D(pool_size=(2, 2)))
        self.model.add(layers.Dropout(0.25))

        self.model.add(layers.Flatten())
        self.model.add(layers.Dense(3072, activation='relu'))
        self.model.add(layers.Dropout(0.5))
        self.model.add(layers.Dense(128, activation='relu'))
        self.model.add(layers.Dropout(0.5))
        self.model.add(layers.Dense(3, activation='softmax'))

        adam = optimizers.Adamax()
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=adam,
                           metrics=['acc'])
예제 #4
0
    def create_model(self, img_shape, num_class):

        concat_axis = 3
        inputs = layers.Input(shape = img_shape)

        conv1 = layers.Conv2D(32, (3, 3), activation='relu', padding='same', name='conv1_1')(inputs)
        conv1 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
        pool1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)
        conv2 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
        conv2 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
        pool2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)

        conv3 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
        conv3 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
        pool3 = layers.MaxPooling2D(pool_size=(2, 2))(conv3)

        conv4 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)
        conv4 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
        pool4 = layers.MaxPooling2D(pool_size=(2, 2))(conv4)

        conv5 = layers.Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
        conv5 = layers.Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)

        up_conv5 = layers.UpSampling2D(size=(2, 2))(conv5)
        ch, cw = self.get_crop_shape(conv4, up_conv5)
        crop_conv4 = layers.Cropping2D(cropping=(ch,cw))(conv4)
        up6 = layers.concatenate([up_conv5, crop_conv4], axis=concat_axis)
        conv6 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(up6)
        conv6 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)

        up_conv6 = layers.UpSampling2D(size=(2, 2))(conv6)
        ch, cw = self.get_crop_shape(conv3, up_conv6)
        crop_conv3 = layers.Cropping2D(cropping=(ch,cw))(conv3)
        up7 = layers.concatenate([up_conv6, crop_conv3], axis=concat_axis) 
        conv7 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
        conv7 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)

        up_conv7 = layers.UpSampling2D(size=(2, 2))(conv7)
        ch, cw = self.get_crop_shape(conv2, up_conv7)
        crop_conv2 = layers.Cropping2D(cropping=(ch,cw))(conv2)
        up8 = layers.concatenate([up_conv7, crop_conv2], axis=concat_axis)
        conv8 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(up8)
        conv8 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)

        up_conv8 = layers.UpSampling2D(size=(2, 2))(conv8)
        ch, cw = self.get_crop_shape(conv1, up_conv8)
        crop_conv1 = layers.Cropping2D(cropping=(ch,cw))(conv1)
        up9 = layers.concatenate([up_conv8, crop_conv1], axis=concat_axis)
        conv9 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(up9)
        conv9 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)

        ch, cw = self.get_crop_shape(inputs, conv9)
        conv9 = layers.ZeroPadding2D(padding=((ch[0], ch[1]), (cw[0], cw[1])))(conv9)
        conv10 = layers.Conv2D(num_class, (1, 1))(conv9)

        model = models.Model(inputs=inputs, outputs=conv10)

        return model
예제 #5
0
파일: model2.py 프로젝트: 7LFB/Typhoon
    def build_unet(self):

        conv1 = layers.Conv2D(32, (3, 3), activation='relu', padding='same', name='conv1_1')(self.model_input)
        conv1 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
        pool1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)
        conv2 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
        conv2 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
        pool2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)

        conv3 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
        conv3 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
        pool3 = layers.MaxPooling2D(pool_size=(2, 2))(conv3)

        conv4 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)
        conv4 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
        pool4 = layers.MaxPooling2D(pool_size=(2, 2))(conv4)

        conv5 = layers.Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
        conv5 = layers.Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)

        up_conv5 = layers.UpSampling2D(size=(2, 2))(conv5)
        ch, cw = self.get_crop_shape(conv4, up_conv5)
        crop_conv4 = layers.Cropping2D(cropping=(ch,cw))(conv4)
        up6 = layers.concatenate([up_conv5, crop_conv4], axis=3)
        conv6 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(up6)
        conv6 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)

        up_conv6 = layers.UpSampling2D(size=(2, 2))(conv6)
        ch, cw = self.get_crop_shape(conv3, up_conv6)
        crop_conv3 = layers.Cropping2D(cropping=(ch,cw))(conv3)
        up7 = layers.concatenate([up_conv6, crop_conv3], axis=3) 
        conv7 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
        conv7 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)

        up_conv7 = layers.UpSampling2D(size=(2, 2))(conv7)
        ch, cw = self.get_crop_shape(conv2, up_conv7)
        crop_conv2 = layers.Cropping2D(cropping=(ch,cw))(conv2)
        up8 = layers.concatenate([up_conv7, crop_conv2], axis=3)
        conv8 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(up8)
        conv8 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)

        up_conv8 = layers.UpSampling2D(size=(2, 2))(conv8)
        ch, cw = self.get_crop_shape(conv1, up_conv8)
        crop_conv1 = layers.Cropping2D(cropping=(ch,cw))(conv1)
        up9 = layers.concatenate([up_conv8, crop_conv1], axis=3)
        conv9 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(up9)
        conv9 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)

        ch, cw = self.get_crop_shape(self.model_input, conv9)
        conv9 = layers.ZeroPadding2D(padding=((ch[0], ch[1]), (cw[0], cw[1])))(conv9)
        conv10 = layers.Conv2D(2, (3, 3),activation='sigmoid', padding='same')(conv9)

        self.img_pred=conv10
예제 #6
0
def ShatheNet_v2(n_classes=256, weights=None):
    # paddign same, filtros mas pequemos..
    input_shape = (192, 192, 3)

    inputs = layers.Input(shape=input_shape)

    # a layer instance is callable on a tensor, and returns a tensor
    x = conv2d_bn(inputs, 32, 3, 3, padding='valid', strides=(2, 2))
    x = conv2d_bn(x, 64, 1, 1, padding='valid', strides=(1, 1))
    x = conv2d_bn(x, 64, 3, 3, padding='valid', strides=(1, 1))
    x = layers.MaxPooling2D((2, 2))(x)
    x = dense_block(x, 8, 32)
    x = transition_block(x, 96)
    x = dense_block(x, 12, 32)
    x = transition_block(x, 128)
    x = dense_block(x, 20, 32)
    x = transition_block(x, 196)
    x = dense_block(x, 16, 32)
    x = layers.GlobalAveragePooling2D()(x)
    # x = layers.Flatten()(x)
    predictions = layers.Dense(n_classes, activation='softmax')(x)
    model = models.Model(inputs=inputs, outputs=predictions)

    if weights:
        model.load_weights(weights)
    return model
예제 #7
0
def classifier_model():  #Building of the CNN
    model = models.Sequential()

    model.add(
        layers.Conv2D(1, [2, 10],
                      input_shape=(40, 44, 1),
                      strides=(1, 1),
                      padding='valid',
                      activation='relu',
                      data_format='channels_last'))

    model.add(
        layers.MaxPooling2D(pool_size=(2, 2),
                            strides=None,
                            padding='valid',
                            data_format=None))

    model.add(
        layers.Conv2D(1, [2, 6],
                      strides=(1, 1),
                      padding='valid',
                      activation='relu'))

    model.add(
        layers.MaxPooling2D(pool_size=(2, 2),
                            strides=None,
                            padding='valid',
                            data_format=None))

    model.add(
        layers.Conv2D(1, [2, 3],
                      strides=(1, 1),
                      padding='valid',
                      activation='relu'))

    model.add(layers.Flatten())

    model.add(layers.Dense(1))

    print(model.summary())
    return model
예제 #8
0
    def down_block(self, data, f, name):
        x = layers.MaxPooling2D(pool_size=(2,2), strides=(2,2))(data)
        # temp = conv_bn_relu(data, (3, 3), (2, 2), (1, 1),
        #                     f, 'layer1_{}'.format(name))
        temp = self.conv_bn_relu(x, (3, 3), (1, 1),
                            2*f, 'layer2_{}'.format(name))
        bn = layers.BatchNormalization(momentum=0.99, name='layer3_bn_{}'.format(name))(self.conv(temp, (3, 3), (1, 1), f, 'layer3_{}'.format(
            name)))


        #bn = layers.add([bn,x])

        bn = self.shortcut(x,bn)

        act = layers.Activation('relu')(bn)
        return bn, act
예제 #9
0
def Attention_ResUNet_PA(dropout_rate=0.0, batch_norm=True):
    '''
    Rsidual UNet construction, with attention gate
    convolution: 3*3 SAME padding
    pooling: 2*2 VALID padding
    upsampling: 3*3 VALID padding
    final convolution: 1*1
    :param dropout_rate: FLAG & RATE of dropout.
            if < 0 dropout cancelled, if > 0 set as the rate
    :param batch_norm: flag of if batch_norm used,
            if True batch normalization
    :return: model
    '''
    # input data
    # dimension of the image depth
    inputs = layers.Input((INPUT_SIZE, INPUT_SIZE, INPUT_CHANNEL),
                          dtype=tf.float32)
    axis = 3

    # Downsampling layers
    # DownRes 1, double residual convolution + pooling
    conv_128 = double_conv_layer(inputs, FILTER_SIZE, FILTER_NUM, dropout_rate,
                                 batch_norm)
    pool_64 = layers.MaxPooling2D(pool_size=(2, 2))(conv_128)
    # DownRes 2
    conv_64 = double_conv_layer(pool_64, FILTER_SIZE, 2 * FILTER_NUM,
                                dropout_rate, batch_norm)
    pool_32 = layers.MaxPooling2D(pool_size=(2, 2))(conv_64)
    # DownRes 3
    conv_32 = double_conv_layer(pool_32, FILTER_SIZE, 4 * FILTER_NUM,
                                dropout_rate, batch_norm)
    pool_16 = layers.MaxPooling2D(pool_size=(2, 2))(conv_32)
    # DownRes 4
    conv_16 = double_conv_layer(pool_16, FILTER_SIZE, 8 * FILTER_NUM,
                                dropout_rate, batch_norm)
    pool_8 = layers.MaxPooling2D(pool_size=(2, 2))(conv_16)
    # DownRes 5, convolution only
    conv_8 = double_conv_layer(pool_8, FILTER_SIZE, 16 * FILTER_NUM,
                               dropout_rate, batch_norm)

    # Upsampling layers

    # UpRes 6, attention gated concatenation + upsampling + double residual convolution
    # channel attention block
    se_conv_16 = SE_block(conv_16,
                          out_dim=8 * FILTER_NUM,
                          ratio=SE_RATIO,
                          name='att_16')
    # spatial attention block
    gating_16 = gating_signal(conv_8, 8 * FILTER_NUM, batch_norm)
    att_16 = attention_block(se_conv_16,
                             gating_16,
                             8 * FILTER_NUM,
                             name='att_16')
    # attention re-weight & concatenate
    up_16 = layers.UpSampling2D(size=(UP_SAMP_SIZE, UP_SAMP_SIZE),
                                data_format="channels_last")(conv_8)
    up_16 = layers.concatenate([up_16, att_16], axis=axis)
    up_conv_16 = double_conv_layer(up_16, FILTER_SIZE, 8 * FILTER_NUM,
                                   dropout_rate, batch_norm)

    # UpRes 7
    # channel attention block
    se_conv_32 = SE_block(conv_32,
                          out_dim=4 * FILTER_NUM,
                          ratio=SE_RATIO,
                          name='att_32')
    # spatial attention block
    gating_32 = gating_signal(up_conv_16, 4 * FILTER_NUM, batch_norm)
    att_32 = attention_block(se_conv_32,
                             gating_32,
                             4 * FILTER_NUM,
                             name='att_32')
    # attention re-weight & concatenate
    up_32 = layers.UpSampling2D(size=(UP_SAMP_SIZE, UP_SAMP_SIZE),
                                data_format="channels_last")(up_conv_16)
    up_32 = layers.concatenate([up_32, att_32], axis=axis)
    up_conv_32 = double_conv_layer(up_32, FILTER_SIZE, 4 * FILTER_NUM,
                                   dropout_rate, batch_norm)

    # UpRes 8
    # channel attention block
    se_conv_64 = SE_block(conv_64,
                          out_dim=2 * FILTER_NUM,
                          ratio=SE_RATIO,
                          name='att_64')
    # spatial attention block
    gating_64 = gating_signal(up_conv_32, 2 * FILTER_NUM, batch_norm)
    att_64 = attention_block(se_conv_64,
                             gating_64,
                             2 * FILTER_NUM,
                             name='att_64')
    # attention re-weight & concatenate
    up_64 = layers.UpSampling2D(size=(UP_SAMP_SIZE, UP_SAMP_SIZE),
                                data_format="channels_last")(up_conv_32)
    up_64 = layers.concatenate([up_64, att_64], axis=axis)
    up_conv_64 = double_conv_layer(up_64, FILTER_SIZE, 2 * FILTER_NUM,
                                   dropout_rate, batch_norm)

    # UpRes 9
    # channel attention block
    se_conv_128 = SE_block(conv_128,
                           out_dim=FILTER_NUM,
                           ratio=SE_RATIO,
                           name='att_128')
    # spatial attention block
    gating_128 = gating_signal(up_conv_64, FILTER_NUM, batch_norm)
    # attention re-weight & concatenate
    att_128 = attention_block(se_conv_128,
                              gating_128,
                              FILTER_NUM,
                              name='att_128')
    up_128 = layers.UpSampling2D(size=(UP_SAMP_SIZE, UP_SAMP_SIZE),
                                 data_format="channels_last")(up_conv_64)
    up_128 = layers.concatenate([up_128, att_128], axis=axis)
    up_conv_128 = double_conv_layer(up_128, FILTER_SIZE, FILTER_NUM,
                                    dropout_rate, batch_norm)

    # 1*1 convolutional layers
    # valid padding
    # batch normalization
    # sigmoid nonlinear activation
    conv_final = layers.Conv2D(OUTPUT_MASK_CHANNEL,
                               kernel_size=(1, 1))(up_conv_128)
    conv_final = layers.BatchNormalization(axis=axis)(conv_final)
    conv_final = layers.Activation('relu')(conv_final)

    # Model integration
    model = models.Model(inputs, conv_final, name="AttentionSEResUNet")
    return model
예제 #10
0
def encoder_block(input_tensor, num_filters):
    encoder = conv_block(input_tensor, num_filters)
    encoder_pool = layers.MaxPooling2D((2, 2), strides=(2, 2))(encoder)
    return encoder_pool, encoder
예제 #11
0
    def create_model(self, x_shape, y_shape):
        # Specify inputs (size is given in opts.py file)
        x_in = layers.Input(shape=x_shape, name='x_in')
        y_rfp = layers.Input(shape=y_shape, name='y_rfp')

        # First two conv layers of source cell encoder
        conv1 = layers.Conv2D(96, (3, 3),
                              activation='relu',
                              padding='same',
                              name='conv1_1')(x_in)
        conv1 = layers.BatchNormalization()(conv1)
        pool1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)
        conv2 = layers.Conv2D(256, (3, 3),
                              activation='relu',
                              padding='same',
                              name='conv2_1')(pool1)
        conv2 = layers.BatchNormalization()(conv2)
        pool2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)

        # First two conv layers of target marker encoder
        rfpconv1 = layers.Conv2D(16, (3, 3),
                                 activation='relu',
                                 padding='same',
                                 name='rfpconv1_1')(y_rfp)
        rfpconv1 = layers.BatchNormalization()(rfpconv1)
        rfppool1 = layers.MaxPooling2D(pool_size=(2, 2))(rfpconv1)
        rfpconv2 = layers.Conv2D(32, (3, 3),
                                 activation='relu',
                                 padding='same',
                                 name='rfpconv2_1')(rfppool1)
        rfpconv2 = layers.BatchNormalization()(rfpconv2)
        rfppool2 = layers.MaxPooling2D(pool_size=(2, 2))(rfpconv2)

        # Last three conv layers of source cell encoder
        conv3 = layers.Conv2D(384, (3, 3),
                              activation='relu',
                              padding='same',
                              name='conv3_1')(pool2)
        conv3 = layers.BatchNormalization()(conv3)
        conv4 = layers.Conv2D(384, (3, 3),
                              activation='relu',
                              padding='same',
                              name='conv4_1')(conv3)
        conv4 = layers.BatchNormalization()(conv4)
        conv5 = layers.Conv2D(256, (3, 3),
                              activation='relu',
                              padding='same',
                              name='conv5_1')(conv4)
        conv5 = layers.BatchNormalization()(conv5)

        # Last conv layer of target marker encoder
        rfpconv3 = layers.Conv2D(32, (3, 3),
                                 activation='relu',
                                 padding='same',
                                 name='rfpconv3_1')(rfppool2)
        rfpconv3 = layers.BatchNormalization()(rfpconv3)

        # Concatencation later
        conv5 = layers.Concatenate(axis=-1)([conv5, rfpconv3])

        # Decoder layers
        conv6 = layers.Conv2D(256, (3, 3),
                              activation='relu',
                              padding='same',
                              name='conv6_1')(conv5)
        conv7 = layers.Conv2D(384, (3, 3),
                              activation='relu',
                              padding='same',
                              name='conv7_1')(conv6)
        conv8 = layers.Conv2D(384, (3, 3),
                              activation='relu',
                              padding='same',
                              name='conv8_1')(conv7)
        up_conv9 = layers.UpSampling2D(size=(2, 2))(conv8)
        conv9 = layers.Conv2D(256, (3, 3),
                              activation='relu',
                              padding='same',
                              name='conv9_1')(up_conv9)
        up_conv10 = layers.UpSampling2D(size=(2, 2))(conv9)
        conv10 = layers.Conv2D(96, (3, 3),
                               activation='relu',
                               padding='same',
                               name='conv10_1')(up_conv10)
        conv10 = layers.Conv2D(1, (1, 1), activation=None,
                               name='y_gfp')(conv10)

        # Paired cell inpainting output
        model = models.Model(inputs=[x_in, y_rfp], outputs=conv10)

        return model
예제 #12
0
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)

###############################
### 使用keras API开始定义模型 ###
###############################
model = models.Sequential()

# 向模型中添加层
model.add(
    layers.Conv2D(
        32,
        kernel_size=(5, 5),  # 添加卷积层,深度32,过滤器大小5*5
        activation=tf.nn.relu,  # 使用relu激活函数
        input_shape=(img_rows, img_cols, 1)))  # 输入的尺寸就是一张图片的尺寸(28,28,1)
model.add(layers.MaxPooling2D(pool_size=(2, 2)))  # 添加池化层,过滤器大小是2*2
model.add(layers.Conv2D(64, (5, 5), activation=tf.nn.relu))  # 添加卷积层,简单写法
model.add(layers.MaxPooling2D(pool_size=(2, 2)))  # 添加池化层
model.add(layers.Flatten())  # 将池化层的输出拉直,然后作为全连接层的输入
model.add(layers.Dense(500, activation=tf.nn.relu))  # 添加有500个结点的全连接层,激活函数用relu
model.add(layers.Dense(10,
                       activation=tf.nn.softmax))  # 输出最终结果,有10个,激活函数用softmax

# 定义损失函数、优化函数、评测方法
model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.SGD(),
              metrics=['accuracy'])

# 自动完成模型的训练过程
model.fit(
    X_train,
예제 #13
0
def VanillaUnet(num_class, img_shape):

    concat_axis = 3
    # input
    inputs = layers.Input(shape=img_shape)

    # Unet convolution block 1
    conv1 = layers.Conv2D(32, (3, 3),
                          activation='relu',
                          padding='same',
                          name='conv1_1')(inputs)
    conv1 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
    pool1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)

    # Unet convolution block 2
    conv2 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
    conv2 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
    pool2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)

    # Unet convolution block 3
    conv3 = layers.Conv2D(128, (3, 3), activation='relu',
                          padding='same')(pool2)
    conv3 = layers.Conv2D(128, (3, 3), activation='relu',
                          padding='same')(conv3)
    pool3 = layers.MaxPooling2D(pool_size=(2, 2))(conv3)

    # Unet convolution block 4
    conv4 = layers.Conv2D(256, (3, 3), activation='relu',
                          padding='same')(pool3)
    conv4 = layers.Conv2D(256, (3, 3), activation='relu',
                          padding='same')(conv4)
    pool4 = layers.MaxPooling2D(pool_size=(2, 2))(conv4)

    # Unet convolution block 5
    conv5 = layers.Conv2D(512, (3, 3), activation='relu',
                          padding='same')(pool4)
    conv5 = layers.Conv2D(512, (3, 3), activation='relu',
                          padding='same')(conv5)

    # Unet up-sampling block 1; Concatenation with crop_conv4
    up_conv5 = layers.UpSampling2D(size=(2, 2))(conv5)
    ch, cw = get_crop_shape(conv4, up_conv5)
    crop_conv4 = layers.Cropping2D(cropping=(ch, cw))(conv4)
    up6 = layers.concatenate([up_conv5, crop_conv4], axis=concat_axis)
    conv6 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(up6)
    conv6 = layers.Conv2D(256, (3, 3), activation='relu',
                          padding='same')(conv6)

    # Unet up-sampling block 2; Concatenation with crop_conv3
    up_conv6 = layers.UpSampling2D(size=(2, 2))(conv6)
    ch, cw = get_crop_shape(conv3, up_conv6)
    crop_conv3 = layers.Cropping2D(cropping=(ch, cw))(conv3)
    up7 = layers.concatenate([up_conv6, crop_conv3], axis=concat_axis)
    conv7 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
    conv7 = layers.Conv2D(128, (3, 3), activation='relu',
                          padding='same')(conv7)

    # Unet up-sampling block 3; Concatenation with crop_conv2
    up_conv7 = layers.UpSampling2D(size=(2, 2))(conv7)
    ch, cw = get_crop_shape(conv2, up_conv7)
    crop_conv2 = layers.Cropping2D(cropping=(ch, cw))(conv2)
    up8 = layers.concatenate([up_conv7, crop_conv2], axis=concat_axis)
    conv8 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(up8)
    conv8 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)

    # Unet up-sampling block 4; Concatenation with crop_conv1
    up_conv8 = layers.UpSampling2D(size=(2, 2))(conv8)
    ch, cw = get_crop_shape(conv1, up_conv8)
    crop_conv1 = layers.Cropping2D(cropping=(ch, cw))(conv1)
    up9 = layers.concatenate([up_conv8, crop_conv1], axis=concat_axis)
    conv9 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(up9)
    conv9 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)

    ch, cw = get_crop_shape(inputs, conv9)
    conv9 = layers.ZeroPadding2D(padding=((ch[0], ch[1]), (cw[0],
                                                           cw[1])))(conv9)
    conv10 = layers.Conv2D(num_class, (1, 1))(conv9)

    model = models.Model(inputs=inputs, outputs=conv10)

    return model
예제 #14
0
def UNet_PA(dropout_rate=0.0, batch_norm=True):
    '''
    UNet construction
    convolution: 3*3 SAME padding
    pooling: 2*2 VALID padding
    upsampling: 3*3 VALID padding
    final convolution: 1*1
    :param dropout_rate: FLAG & RATE of dropout.
            if < 0 dropout cancelled, if > 0 set as the rate
    :param batch_norm: flag of if batch_norm used,
            if True batch normalization
    :return: UNet model for PACT recons
    '''
    # input data
    # dimension of the image depth
    inputs = layers.Input((INPUT_SIZE, INPUT_SIZE, INPUT_CHANNEL))
    axis = 3

    # Subsampling layers
    # double layer 1, convolution + pooling
    conv_128 = double_conv_layer(inputs, FILTER_SIZE, INPUT_SIZE, dropout_rate,
                                 batch_norm)
    pool_64 = layers.MaxPooling2D(pool_size=(2, 2))(conv_128)
    # double layer 2
    conv_64 = double_conv_layer(pool_64, 2 * FILTER_SIZE, INPUT_SIZE,
                                dropout_rate, batch_norm)
    pool_32 = layers.MaxPooling2D(pool_size=(2, 2))(conv_64)
    # double layer 3
    conv_32 = double_conv_layer(pool_32, 4 * FILTER_SIZE, INPUT_SIZE,
                                dropout_rate, batch_norm)
    pool_16 = layers.MaxPooling2D(pool_size=(2, 2))(conv_32)
    # double layer 4
    conv_16 = double_conv_layer(pool_16, 8 * FILTER_SIZE, INPUT_SIZE,
                                dropout_rate, batch_norm)
    pool_8 = layers.MaxPooling2D(pool_size=(2, 2))(conv_16)
    # double layer 5, convolution only
    conv_8 = double_conv_layer(pool_8, 16 * FILTER_SIZE, INPUT_SIZE,
                               dropout_rate, batch_norm)

    # Upsampling layers
    # double layer 6, upsampling + concatenation + convolution
    up_16 = layers.UpSampling2D(size=(UP_SAMP_SIZE, UP_SAMP_SIZE),
                                data_format="channels_last")(conv_8)
    up_16 = layers.concatenate([up_16, conv_16], axis=axis)
    up_conv_16 = double_conv_layer(up_16, 8 * FILTER_SIZE, INPUT_SIZE,
                                   dropout_rate, batch_norm)
    # double layer 7
    up_32 = layers.concatenate([
        layers.UpSampling2D(size=(UP_SAMP_SIZE, UP_SAMP_SIZE),
                            data_format="channels_last")(up_conv_16), conv_32
    ],
                               axis=axis)
    up_conv_32 = double_conv_layer(up_32, 4 * FILTER_SIZE, INPUT_SIZE,
                                   dropout_rate, batch_norm)
    # double layer 8
    up_64 = layers.concatenate([
        layers.UpSampling2D(size=(UP_SAMP_SIZE, UP_SAMP_SIZE),
                            data_format="channels_last")(up_conv_32), conv_64
    ],
                               axis=axis)
    up_conv_64 = double_conv_layer(up_64, 2 * FILTER_SIZE, INPUT_SIZE,
                                   dropout_rate, batch_norm)
    # double layer 9
    up_128 = layers.concatenate([
        layers.UpSampling2D(size=(UP_SAMP_SIZE, UP_SAMP_SIZE),
                            data_format="channels_last")(up_conv_64), conv_128
    ],
                                axis=axis)
    up_conv_128 = double_conv_layer(up_128, FILTER_SIZE, INPUT_SIZE,
                                    dropout_rate, batch_norm)

    # 1*1 convolutional layers
    # valid padding
    # batch normalization
    # sigmoid nonlinear activation
    conv_final = layers.Conv2D(OUTPUT_MASK_CHANNEL,
                               kernel_size=(1, 1))(up_conv_128)
    conv_final = layers.BatchNormalization(axis=axis)(conv_final)
    conv_final = layers.Activation('sigmoid')(conv_final)

    # Model integration
    model = models.Model(inputs, conv_final, name="UNet")
    return model
예제 #15
0
    def _cnn_ctc_init(self):
        self.input_data = layers.Input(name='the_input',
                                       shape=(self.AUDIO_LENGTH,
                                              self.AUDIO_FEATURE_LENGTH, 1))

        layers_h1 = layers.Conv2D(filters=32,
                                  kernel_size=(3, 3),
                                  use_bias=False,
                                  activation='relu',
                                  padding='same',
                                  kernel_initializer='he_normal')(
                                      self.input_data)
        layers_h1 = layers.Dropout(rate=0.05)(layers_h1)

        layers_h2 = layers.Conv2D(filters=32,
                                  kernel_size=(3, 3),
                                  use_bias=True,
                                  activation='relu',
                                  padding='same',
                                  kernel_initializer='he_normal')(layers_h1)

        layers_h3 = layers.MaxPooling2D(pool_size=2,
                                        strides=None,
                                        padding='valid')(layers_h2)
        layers_h3 = layers.Dropout(rate=0.05)(layers_h3)

        layers_h4 = layers.Conv2D(filters=64,
                                  kernel_size=(3, 3),
                                  use_bias=True,
                                  activation='relu',
                                  padding='same',
                                  kernel_initializer='he_normal')(layers_h3)
        layers_h4 = layers.Dropout(rate=0.1)(layers_h4)

        layers_h5 = layers.Conv2D(filters=64,
                                  kernel_size=(3, 3),
                                  use_bias=True,
                                  activation='relu',
                                  padding='same',
                                  kernel_initializer='he_normal')(layers_h4)

        layers_h6 = layers.MaxPooling2D(pool_size=2,
                                        strides=None,
                                        padding='valid')(layers_h5)
        layers_h6 = layers.Dropout(rate=0.1)(layers_h6)

        layers_h7 = layers.Conv2D(filters=128,
                                  kernel_size=(3, 3),
                                  use_bias=True,
                                  activation='relu',
                                  padding='same',
                                  kernel_initializer='he_normal')(layers_h6)
        layers_h7 = layers.Dropout(rate=0.15)(layers_h7)

        layers_h8 = layers.Conv2D(filters=128,
                                  kernel_size=(3, 3),
                                  use_bias=True,
                                  activation='relu',
                                  padding='same',
                                  kernel_initializer='he_normal')(layers_h7)

        layers_h9 = layers.MaxPooling2D(pool_size=2,
                                        strides=None,
                                        padding='valid')(layers_h8)
        layers_h9 = layers.Dropout(rate=0.15)(layers_h9)

        layers_h10 = layers.Conv2D(filters=128,
                                   kernel_size=(3, 3),
                                   use_bias=True,
                                   activation='relu',
                                   padding='same',
                                   kernel_initializer='he_normal')(layers_h9)
        layers_h10 = layers.Dropout(rate=0.2)(layers_h10)

        layers_h11 = layers.Conv2D(filters=128,
                                   kernel_size=(3, 3),
                                   use_bias=True,
                                   activation='relu',
                                   padding='same',
                                   kernel_initializer='he_normal')(layers_h10)

        layers_h12 = layers.MaxPooling2D(pool_size=1,
                                         strides=None,
                                         padding='valid')(layers_h11)

        layers_h12 = layers.Dropout(rate=0.2)(layers_h12)

        layers_h13 = layers.Conv2D(filters=128,
                                   kernel_size=(3, 3),
                                   use_bias=True,
                                   activation='relu',
                                   padding='same',
                                   kernel_initializer='he_normal')(layers_h12)
        layers_h13 = layers.Dropout(rate=0.2)(layers_h13)

        layers_h14 = layers.Conv2D(filters=128,
                                   kernel_size=(3, 3),
                                   use_bias=True,
                                   activation='relu',
                                   padding='same',
                                   kernel_initializer='he_normal')(layers_h13)

        layers_h15 = layers.MaxPooling2D(pool_size=1,
                                         strides=None,
                                         padding='valid')(layers_h14)

        layers_h16 = layers.Reshape(
            (self.AUDIO_FEATURE_LENGTH, self.AUDIO_LENGTH * 2))(layers_h15)
        layers_h16 = layers.Dropout(rate=0.3)(layers_h16)

        layers_h17 = layers.Dense(units=128,
                                  use_bias=True,
                                  activation='relu',
                                  kernel_initializer='he_normal')(layers_h16)
        layers_h17 = layers.Dropout(rate=0.3)(layers_h17)

        layers_h18 = layers.Dense(units=self.OUTPUT_SIZE,
                                  use_bias=True,
                                  kernel_initializer='he_normal')(layers_h17)

        y_pred = layers.Activation('softmax', name='activation_0')(layers_h18)

        self.cnn_model = models.Model(inputs=self.input_data, outputs=y_pred)

        self.labels = layers.Input(name='the_label',
                                   shape=[self.LABEL_SEQUENCE_LENGTH],
                                   dtype='float32')
        self.input_length = layers.Input(name='input_length',
                                         shape=[1],
                                         dtype='int64')
        self.label_length = layers.Input(name='label_length',
                                         shape=[1],
                                         dtype='int64')
        self.loss = layers.Lambda(function=self._ctc_lambda_func,
                                  output_shape=(1, ),
                                  name='ctc')([
                                      y_pred, self.labels, self.input_length,
                                      self.label_length
                                  ])

        self.ctc_model = models.Model(inputs=[
            self.input_data, self.labels, self.input_length, self.label_length
        ],
                                      outputs=self.loss)
        optimizer = optimizers.Adam(lr=0.0001,
                                    beta_1=0.9,
                                    beta_2=0.999,
                                    decay=0.0,
                                    epsilon=10e-8)
        self.ctc_model.compile(optimizer=optimizer,
                               loss={
                                   'ctc': lambda y_true, y_pred: y_pred
                               })
        print('[*Info] Create Model Successful, Compiles Model Successful. ')

        return self.cnn_model, self.ctc_model
예제 #16
0
 def max_pool_2(self, input, pool_factor, name='irrelevant'):
     return keras_ly.MaxPooling2D(pool_size=(pool_factor,
                                             pool_factor))(input)
예제 #17
0
    def create_model(self, img_shape, num_class):

        concat_axis = 3
        inputs = layers.Input(shape=img_shape)

        conv1 = layers.Conv2D(32, (3, 3),
                              activation='relu',
                              padding='same',
                              name='conv1_1')(inputs)
        conv1 = layers.Conv2D(32, (3, 3), activation='relu',
                              padding='same')(conv1)
        pool1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)
        conv2 = layers.Conv2D(64, (3, 3), activation='relu',
                              padding='same')(pool1)
        conv2 = layers.Conv2D(64, (3, 3), activation='relu',
                              padding='same')(conv2)
        pool2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)

        conv3 = layers.Conv2D(128, (3, 3), activation='relu',
                              padding='same')(pool2)
        conv3 = layers.Conv2D(128, (3, 3), activation='relu',
                              padding='same')(conv3)
        pool3 = layers.MaxPooling2D(pool_size=(2, 2))(conv3)

        conv4 = layers.Conv2D(256, (3, 3), activation='relu',
                              padding='same')(pool3)
        conv4 = layers.Conv2D(256, (3, 3), activation='relu',
                              padding='same')(conv4)
        pool4 = layers.MaxPooling2D(pool_size=(2, 2))(conv4)

        ## Use dilated convolution
        x = pool4
        depth = 3  #3 #6
        dilated_layers = []
        mode = 'cascade'

        if mode == 'cascade':
            for i in range(depth):
                x = layers.Conv2D(512, (3, 3),
                                  activation='relu',
                                  padding='same',
                                  dilation_rate=2**i)(x)
                dilated_layers.append(x)
            conv5 = layers.add(dilated_layers)
        elif mode == 'parallel':  #"Atrous Spatial Pyramid Pooling"
            for i in range(depth):
                dilated_layers.append(
                    layers.Conv2D(512, (3, 3),
                                  activation='relu',
                                  padding='same',
                                  dilation_rate=2**i)(x))
            conv5 = layers.add(dilated_layers)

#conv5 = layers.Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
#conv5 = layers.Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)

        up_conv5 = layers.UpSampling2D(size=(2, 2))(conv5)
        ch, cw = self.get_crop_shape(conv4, up_conv5)
        crop_conv4 = layers.Cropping2D(cropping=(ch, cw))(conv4)
        up6 = layers.concatenate([up_conv5, crop_conv4], axis=concat_axis)
        conv6 = layers.Conv2D(256, (3, 3), activation='relu',
                              padding='same')(up6)
        conv6 = layers.Conv2D(256, (3, 3), activation='relu',
                              padding='same')(conv6)

        up_conv6 = layers.UpSampling2D(size=(2, 2))(conv6)
        ch, cw = self.get_crop_shape(conv3, up_conv6)
        crop_conv3 = layers.Cropping2D(cropping=(ch, cw))(conv3)
        up7 = layers.concatenate([up_conv6, crop_conv3], axis=concat_axis)
        conv7 = layers.Conv2D(128, (3, 3), activation='relu',
                              padding='same')(up7)
        conv7 = layers.Conv2D(128, (3, 3), activation='relu',
                              padding='same')(conv7)

        up_conv7 = layers.UpSampling2D(size=(2, 2))(conv7)
        ch, cw = self.get_crop_shape(conv2, up_conv7)
        crop_conv2 = layers.Cropping2D(cropping=(ch, cw))(conv2)
        up8 = layers.concatenate([up_conv7, crop_conv2], axis=concat_axis)
        conv8 = layers.Conv2D(64, (3, 3), activation='relu',
                              padding='same')(up8)
        conv8 = layers.Conv2D(64, (3, 3), activation='relu',
                              padding='same')(conv8)

        up_conv8 = layers.UpSampling2D(size=(2, 2))(conv8)
        ch, cw = self.get_crop_shape(conv1, up_conv8)
        crop_conv1 = layers.Cropping2D(cropping=(ch, cw))(conv1)
        up9 = layers.concatenate([up_conv8, crop_conv1], axis=concat_axis)
        conv9 = layers.Conv2D(32, (3, 3), activation='relu',
                              padding='same')(up9)
        conv9 = layers.Conv2D(32, (3, 3), activation='relu',
                              padding='same')(conv9)

        ch, cw = self.get_crop_shape(inputs, conv9)
        conv9 = layers.ZeroPadding2D(padding=((ch[0], ch[1]), (cw[0],
                                                               cw[1])))(conv9)
        conv10 = layers.Conv2D(num_class, (1, 1))(conv9)

        model = models.Model(inputs=inputs, outputs=conv10)

        return model