コード例 #1
0
    def create_model(self, img_shape, num_class):

        concat_axis = 3
        inputs = layers.Input(shape = img_shape)

        conv1 = layers.Conv2D(32, (3, 3), activation='relu', padding='same', name='conv1_1')(inputs)
        conv1 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
        pool1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)
        conv2 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
        conv2 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
        pool2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)

        conv3 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
        conv3 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
        pool3 = layers.MaxPooling2D(pool_size=(2, 2))(conv3)

        conv4 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)
        conv4 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
        pool4 = layers.MaxPooling2D(pool_size=(2, 2))(conv4)

        conv5 = layers.Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
        conv5 = layers.Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)

        up_conv5 = layers.UpSampling2D(size=(2, 2))(conv5)
        ch, cw = self.get_crop_shape(conv4, up_conv5)
        crop_conv4 = layers.Cropping2D(cropping=(ch,cw))(conv4)
        up6 = layers.concatenate([up_conv5, crop_conv4], axis=concat_axis)
        conv6 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(up6)
        conv6 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)

        up_conv6 = layers.UpSampling2D(size=(2, 2))(conv6)
        ch, cw = self.get_crop_shape(conv3, up_conv6)
        crop_conv3 = layers.Cropping2D(cropping=(ch,cw))(conv3)
        up7 = layers.concatenate([up_conv6, crop_conv3], axis=concat_axis) 
        conv7 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
        conv7 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)

        up_conv7 = layers.UpSampling2D(size=(2, 2))(conv7)
        ch, cw = self.get_crop_shape(conv2, up_conv7)
        crop_conv2 = layers.Cropping2D(cropping=(ch,cw))(conv2)
        up8 = layers.concatenate([up_conv7, crop_conv2], axis=concat_axis)
        conv8 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(up8)
        conv8 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)

        up_conv8 = layers.UpSampling2D(size=(2, 2))(conv8)
        ch, cw = self.get_crop_shape(conv1, up_conv8)
        crop_conv1 = layers.Cropping2D(cropping=(ch,cw))(conv1)
        up9 = layers.concatenate([up_conv8, crop_conv1], axis=concat_axis)
        conv9 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(up9)
        conv9 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)

        ch, cw = self.get_crop_shape(inputs, conv9)
        conv9 = layers.ZeroPadding2D(padding=((ch[0], ch[1]), (cw[0], cw[1])))(conv9)
        conv10 = layers.Conv2D(num_class, (1, 1))(conv9)

        model = models.Model(inputs=inputs, outputs=conv10)

        return model
コード例 #2
0
ファイル: model2.py プロジェクト: 7LFB/Typhoon
    def build_unet(self):

        conv1 = layers.Conv2D(32, (3, 3), activation='relu', padding='same', name='conv1_1')(self.model_input)
        conv1 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
        pool1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)
        conv2 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
        conv2 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
        pool2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)

        conv3 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
        conv3 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
        pool3 = layers.MaxPooling2D(pool_size=(2, 2))(conv3)

        conv4 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)
        conv4 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
        pool4 = layers.MaxPooling2D(pool_size=(2, 2))(conv4)

        conv5 = layers.Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
        conv5 = layers.Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)

        up_conv5 = layers.UpSampling2D(size=(2, 2))(conv5)
        ch, cw = self.get_crop_shape(conv4, up_conv5)
        crop_conv4 = layers.Cropping2D(cropping=(ch,cw))(conv4)
        up6 = layers.concatenate([up_conv5, crop_conv4], axis=3)
        conv6 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(up6)
        conv6 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)

        up_conv6 = layers.UpSampling2D(size=(2, 2))(conv6)
        ch, cw = self.get_crop_shape(conv3, up_conv6)
        crop_conv3 = layers.Cropping2D(cropping=(ch,cw))(conv3)
        up7 = layers.concatenate([up_conv6, crop_conv3], axis=3) 
        conv7 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
        conv7 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)

        up_conv7 = layers.UpSampling2D(size=(2, 2))(conv7)
        ch, cw = self.get_crop_shape(conv2, up_conv7)
        crop_conv2 = layers.Cropping2D(cropping=(ch,cw))(conv2)
        up8 = layers.concatenate([up_conv7, crop_conv2], axis=3)
        conv8 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(up8)
        conv8 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)

        up_conv8 = layers.UpSampling2D(size=(2, 2))(conv8)
        ch, cw = self.get_crop_shape(conv1, up_conv8)
        crop_conv1 = layers.Cropping2D(cropping=(ch,cw))(conv1)
        up9 = layers.concatenate([up_conv8, crop_conv1], axis=3)
        conv9 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(up9)
        conv9 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)

        ch, cw = self.get_crop_shape(self.model_input, conv9)
        conv9 = layers.ZeroPadding2D(padding=((ch[0], ch[1]), (cw[0], cw[1])))(conv9)
        conv10 = layers.Conv2D(2, (3, 3),activation='sigmoid', padding='same')(conv9)

        self.img_pred=conv10
コード例 #3
0
ファイル: test_tpu.py プロジェクト: jzuern/keras-resnet
    def grouped_convolution(y, nb_channels, _strides):
        # when `cardinality` == 1 this is just a standard convolution
        if cardinality == 1:
            return layers.Conv2D(nb_channels,
                                 kernel_size=(3, 3),
                                 strides=_strides,
                                 padding='same')(y)

        assert not nb_channels % cardinality
        _d = nb_channels // cardinality

        # in a grouped convolution layer, input and output channels are divided into `cardinality` groups,
        # and convolutions are separately performed within each group
        groups = []
        for j in range(cardinality):
            group = layers.Lambda(lambda z: z[:, :, :, j * _d:j * _d + _d])(y)
            groups.append(
                layers.Conv2D(_d,
                              kernel_size=(3, 3),
                              strides=_strides,
                              padding='same')(group))

        # the grouped convolutional layer concatenates them as the outputs of the layer
        y = layers.concatenate(groups)

        return y
コード例 #4
0
def dense_block(x, nb_layers, nb_filter):
    #Hacer algo como contaenation imapres o pares para ver si se reduce el numero mucho?
    filter_augmenation_step = 4
    concatetation_of_inputs = x
    for i in range(nb_layers):
        next_node = node(concatetation_of_inputs, nb_filter)
        concatetation_of_inputs = layers.concatenate(
            [concatetation_of_inputs, next_node], axis=3)
        previous_node = next_node
        nb_filter = nb_filter + filter_augmenation_step

    return concatetation_of_inputs  #hacia la transition layer
コード例 #5
0
ファイル: train.py プロジェクト: recogsal/luna16-unet
def decoder_block(input_tensor, concat_tensor, num_filters):
    decoder = layers.Conv2DTranspose(num_filters, (2, 2), strides=(2, 2), padding='same')(input_tensor)
    decoder = layers.concatenate([concat_tensor, decoder], axis=-1)
    decoder = layers.BatchNormalization()(decoder)
    decoder = layers.Activation('relu')(decoder)
    decoder = layers.Conv2D(num_filters, (3, 3), padding='same')(decoder)
    decoder = layers.BatchNormalization()(decoder)
    decoder = layers.Activation('relu')(decoder)
    decoder = layers.Conv2D(num_filters, (3, 3), padding='same')(decoder)
    decoder = layers.BatchNormalization()(decoder)
    decoder = layers.Activation('relu')(decoder)
    return decoder
コード例 #6
0
    def up_block(self, act, bn, f, name):
        x = layers.UpSampling2D(
            size=(2,2), name='upsample_{}'.format(name))(act)

        temp = layers.concatenate([bn, x], axis=1)
        temp = self.conv_bn_relu(temp, (3, 3), (1, 1),
                            2*f, 'layer2_{}'.format(name))
        temp = layers.BatchNormalization(self.conv(temp, (3, 3), (1, 1), f, 'layer3_{}'.format(
            name)), momentum=0.99, name='layer3_bn_{}'.format(name))

        #bn = layers.add([bn,x])
        bn = self.shortcut(x, bn)
        act = layers.Activation('relu')(bn)
        return act
コード例 #7
0
def rnn_generator(static_dim, sequence_dim, length, code_dim, kind='LSTM'):
    ''''''
    if kind in ('LSTM', 'GRU'):
        rnn = eval('layers.' + kind)
    else:
        raise ValueError, 'no such RNN method "{}"'.format(kind)
    static = layers.Input((static_dim, ))
    repeat = layers.RepeatVector(length)(static)
    sequence = layers.Input((length, sequence_dim))
    sequence_emb = layers.Conv1D(static_dim, 1)(sequence)
    code = layers.concatenate([repeat, sequence_emb])
    emb = rnn(code_dim,
              recurrent_dropout=0.5,
              unroll=True,
              return_sequences=True,
              activation='linear')(code)
    out = layers.Conv1D(sequence_dim, 1)(emb)
    return models.Model([static, sequence], out)
コード例 #8
0
def node(x, nb_filter):

    tower_1 = conv2d_bn(x, nb_filter, 1, 1, padding='same', strides=(1, 1))
    tower_1 = conv2d_bn(tower_1,
                        nb_filter,
                        3,
                        3,
                        padding='same',
                        strides=(1, 1))

    tower_2 = conv2d_bn(x, nb_filter, 1, 1, padding='same', strides=(1, 1))
    tower_2 = conv2d_bn(tower_2,
                        nb_filter,
                        5,
                        5,
                        padding='same',
                        strides=(1, 1))

    output = layers.concatenate([tower_1, tower_2], axis=3)
    return output
コード例 #9
0
def Attention_ResUNet_PA(dropout_rate=0.0, batch_norm=True):
    '''
    Rsidual UNet construction, with attention gate
    convolution: 3*3 SAME padding
    pooling: 2*2 VALID padding
    upsampling: 3*3 VALID padding
    final convolution: 1*1
    :param dropout_rate: FLAG & RATE of dropout.
            if < 0 dropout cancelled, if > 0 set as the rate
    :param batch_norm: flag of if batch_norm used,
            if True batch normalization
    :return: model
    '''
    # input data
    # dimension of the image depth
    inputs = layers.Input((INPUT_SIZE, INPUT_SIZE, INPUT_CHANNEL),
                          dtype=tf.float32)
    axis = 3

    # Downsampling layers
    # DownRes 1, double residual convolution + pooling
    conv_128 = double_conv_layer(inputs, FILTER_SIZE, FILTER_NUM, dropout_rate,
                                 batch_norm)
    pool_64 = layers.MaxPooling2D(pool_size=(2, 2))(conv_128)
    # DownRes 2
    conv_64 = double_conv_layer(pool_64, FILTER_SIZE, 2 * FILTER_NUM,
                                dropout_rate, batch_norm)
    pool_32 = layers.MaxPooling2D(pool_size=(2, 2))(conv_64)
    # DownRes 3
    conv_32 = double_conv_layer(pool_32, FILTER_SIZE, 4 * FILTER_NUM,
                                dropout_rate, batch_norm)
    pool_16 = layers.MaxPooling2D(pool_size=(2, 2))(conv_32)
    # DownRes 4
    conv_16 = double_conv_layer(pool_16, FILTER_SIZE, 8 * FILTER_NUM,
                                dropout_rate, batch_norm)
    pool_8 = layers.MaxPooling2D(pool_size=(2, 2))(conv_16)
    # DownRes 5, convolution only
    conv_8 = double_conv_layer(pool_8, FILTER_SIZE, 16 * FILTER_NUM,
                               dropout_rate, batch_norm)

    # Upsampling layers

    # UpRes 6, attention gated concatenation + upsampling + double residual convolution
    # channel attention block
    se_conv_16 = SE_block(conv_16,
                          out_dim=8 * FILTER_NUM,
                          ratio=SE_RATIO,
                          name='att_16')
    # spatial attention block
    gating_16 = gating_signal(conv_8, 8 * FILTER_NUM, batch_norm)
    att_16 = attention_block(se_conv_16,
                             gating_16,
                             8 * FILTER_NUM,
                             name='att_16')
    # attention re-weight & concatenate
    up_16 = layers.UpSampling2D(size=(UP_SAMP_SIZE, UP_SAMP_SIZE),
                                data_format="channels_last")(conv_8)
    up_16 = layers.concatenate([up_16, att_16], axis=axis)
    up_conv_16 = double_conv_layer(up_16, FILTER_SIZE, 8 * FILTER_NUM,
                                   dropout_rate, batch_norm)

    # UpRes 7
    # channel attention block
    se_conv_32 = SE_block(conv_32,
                          out_dim=4 * FILTER_NUM,
                          ratio=SE_RATIO,
                          name='att_32')
    # spatial attention block
    gating_32 = gating_signal(up_conv_16, 4 * FILTER_NUM, batch_norm)
    att_32 = attention_block(se_conv_32,
                             gating_32,
                             4 * FILTER_NUM,
                             name='att_32')
    # attention re-weight & concatenate
    up_32 = layers.UpSampling2D(size=(UP_SAMP_SIZE, UP_SAMP_SIZE),
                                data_format="channels_last")(up_conv_16)
    up_32 = layers.concatenate([up_32, att_32], axis=axis)
    up_conv_32 = double_conv_layer(up_32, FILTER_SIZE, 4 * FILTER_NUM,
                                   dropout_rate, batch_norm)

    # UpRes 8
    # channel attention block
    se_conv_64 = SE_block(conv_64,
                          out_dim=2 * FILTER_NUM,
                          ratio=SE_RATIO,
                          name='att_64')
    # spatial attention block
    gating_64 = gating_signal(up_conv_32, 2 * FILTER_NUM, batch_norm)
    att_64 = attention_block(se_conv_64,
                             gating_64,
                             2 * FILTER_NUM,
                             name='att_64')
    # attention re-weight & concatenate
    up_64 = layers.UpSampling2D(size=(UP_SAMP_SIZE, UP_SAMP_SIZE),
                                data_format="channels_last")(up_conv_32)
    up_64 = layers.concatenate([up_64, att_64], axis=axis)
    up_conv_64 = double_conv_layer(up_64, FILTER_SIZE, 2 * FILTER_NUM,
                                   dropout_rate, batch_norm)

    # UpRes 9
    # channel attention block
    se_conv_128 = SE_block(conv_128,
                           out_dim=FILTER_NUM,
                           ratio=SE_RATIO,
                           name='att_128')
    # spatial attention block
    gating_128 = gating_signal(up_conv_64, FILTER_NUM, batch_norm)
    # attention re-weight & concatenate
    att_128 = attention_block(se_conv_128,
                              gating_128,
                              FILTER_NUM,
                              name='att_128')
    up_128 = layers.UpSampling2D(size=(UP_SAMP_SIZE, UP_SAMP_SIZE),
                                 data_format="channels_last")(up_conv_64)
    up_128 = layers.concatenate([up_128, att_128], axis=axis)
    up_conv_128 = double_conv_layer(up_128, FILTER_SIZE, FILTER_NUM,
                                    dropout_rate, batch_norm)

    # 1*1 convolutional layers
    # valid padding
    # batch normalization
    # sigmoid nonlinear activation
    conv_final = layers.Conv2D(OUTPUT_MASK_CHANNEL,
                               kernel_size=(1, 1))(up_conv_128)
    conv_final = layers.BatchNormalization(axis=axis)(conv_final)
    conv_final = layers.Activation('relu')(conv_final)

    # Model integration
    model = models.Model(inputs, conv_final, name="AttentionSEResUNet")
    return model
コード例 #10
0
def VanillaUnet(num_class, img_shape):

    concat_axis = 3
    # input
    inputs = layers.Input(shape=img_shape)

    # Unet convolution block 1
    conv1 = layers.Conv2D(32, (3, 3),
                          activation='relu',
                          padding='same',
                          name='conv1_1')(inputs)
    conv1 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
    pool1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)

    # Unet convolution block 2
    conv2 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
    conv2 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
    pool2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)

    # Unet convolution block 3
    conv3 = layers.Conv2D(128, (3, 3), activation='relu',
                          padding='same')(pool2)
    conv3 = layers.Conv2D(128, (3, 3), activation='relu',
                          padding='same')(conv3)
    pool3 = layers.MaxPooling2D(pool_size=(2, 2))(conv3)

    # Unet convolution block 4
    conv4 = layers.Conv2D(256, (3, 3), activation='relu',
                          padding='same')(pool3)
    conv4 = layers.Conv2D(256, (3, 3), activation='relu',
                          padding='same')(conv4)
    pool4 = layers.MaxPooling2D(pool_size=(2, 2))(conv4)

    # Unet convolution block 5
    conv5 = layers.Conv2D(512, (3, 3), activation='relu',
                          padding='same')(pool4)
    conv5 = layers.Conv2D(512, (3, 3), activation='relu',
                          padding='same')(conv5)

    # Unet up-sampling block 1; Concatenation with crop_conv4
    up_conv5 = layers.UpSampling2D(size=(2, 2))(conv5)
    ch, cw = get_crop_shape(conv4, up_conv5)
    crop_conv4 = layers.Cropping2D(cropping=(ch, cw))(conv4)
    up6 = layers.concatenate([up_conv5, crop_conv4], axis=concat_axis)
    conv6 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(up6)
    conv6 = layers.Conv2D(256, (3, 3), activation='relu',
                          padding='same')(conv6)

    # Unet up-sampling block 2; Concatenation with crop_conv3
    up_conv6 = layers.UpSampling2D(size=(2, 2))(conv6)
    ch, cw = get_crop_shape(conv3, up_conv6)
    crop_conv3 = layers.Cropping2D(cropping=(ch, cw))(conv3)
    up7 = layers.concatenate([up_conv6, crop_conv3], axis=concat_axis)
    conv7 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
    conv7 = layers.Conv2D(128, (3, 3), activation='relu',
                          padding='same')(conv7)

    # Unet up-sampling block 3; Concatenation with crop_conv2
    up_conv7 = layers.UpSampling2D(size=(2, 2))(conv7)
    ch, cw = get_crop_shape(conv2, up_conv7)
    crop_conv2 = layers.Cropping2D(cropping=(ch, cw))(conv2)
    up8 = layers.concatenate([up_conv7, crop_conv2], axis=concat_axis)
    conv8 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(up8)
    conv8 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)

    # Unet up-sampling block 4; Concatenation with crop_conv1
    up_conv8 = layers.UpSampling2D(size=(2, 2))(conv8)
    ch, cw = get_crop_shape(conv1, up_conv8)
    crop_conv1 = layers.Cropping2D(cropping=(ch, cw))(conv1)
    up9 = layers.concatenate([up_conv8, crop_conv1], axis=concat_axis)
    conv9 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(up9)
    conv9 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)

    ch, cw = get_crop_shape(inputs, conv9)
    conv9 = layers.ZeroPadding2D(padding=((ch[0], ch[1]), (cw[0],
                                                           cw[1])))(conv9)
    conv10 = layers.Conv2D(num_class, (1, 1))(conv9)

    model = models.Model(inputs=inputs, outputs=conv10)

    return model
コード例 #11
0
ファイル: UNetPACT.py プロジェクト: zizou169/PA_Recons_Model
def UNet_PA(dropout_rate=0.0, batch_norm=True):
    '''
    UNet construction
    convolution: 3*3 SAME padding
    pooling: 2*2 VALID padding
    upsampling: 3*3 VALID padding
    final convolution: 1*1
    :param dropout_rate: FLAG & RATE of dropout.
            if < 0 dropout cancelled, if > 0 set as the rate
    :param batch_norm: flag of if batch_norm used,
            if True batch normalization
    :return: UNet model for PACT recons
    '''
    # input data
    # dimension of the image depth
    inputs = layers.Input((INPUT_SIZE, INPUT_SIZE, INPUT_CHANNEL))
    axis = 3

    # Subsampling layers
    # double layer 1, convolution + pooling
    conv_128 = double_conv_layer(inputs, FILTER_SIZE, INPUT_SIZE, dropout_rate,
                                 batch_norm)
    pool_64 = layers.MaxPooling2D(pool_size=(2, 2))(conv_128)
    # double layer 2
    conv_64 = double_conv_layer(pool_64, 2 * FILTER_SIZE, INPUT_SIZE,
                                dropout_rate, batch_norm)
    pool_32 = layers.MaxPooling2D(pool_size=(2, 2))(conv_64)
    # double layer 3
    conv_32 = double_conv_layer(pool_32, 4 * FILTER_SIZE, INPUT_SIZE,
                                dropout_rate, batch_norm)
    pool_16 = layers.MaxPooling2D(pool_size=(2, 2))(conv_32)
    # double layer 4
    conv_16 = double_conv_layer(pool_16, 8 * FILTER_SIZE, INPUT_SIZE,
                                dropout_rate, batch_norm)
    pool_8 = layers.MaxPooling2D(pool_size=(2, 2))(conv_16)
    # double layer 5, convolution only
    conv_8 = double_conv_layer(pool_8, 16 * FILTER_SIZE, INPUT_SIZE,
                               dropout_rate, batch_norm)

    # Upsampling layers
    # double layer 6, upsampling + concatenation + convolution
    up_16 = layers.UpSampling2D(size=(UP_SAMP_SIZE, UP_SAMP_SIZE),
                                data_format="channels_last")(conv_8)
    up_16 = layers.concatenate([up_16, conv_16], axis=axis)
    up_conv_16 = double_conv_layer(up_16, 8 * FILTER_SIZE, INPUT_SIZE,
                                   dropout_rate, batch_norm)
    # double layer 7
    up_32 = layers.concatenate([
        layers.UpSampling2D(size=(UP_SAMP_SIZE, UP_SAMP_SIZE),
                            data_format="channels_last")(up_conv_16), conv_32
    ],
                               axis=axis)
    up_conv_32 = double_conv_layer(up_32, 4 * FILTER_SIZE, INPUT_SIZE,
                                   dropout_rate, batch_norm)
    # double layer 8
    up_64 = layers.concatenate([
        layers.UpSampling2D(size=(UP_SAMP_SIZE, UP_SAMP_SIZE),
                            data_format="channels_last")(up_conv_32), conv_64
    ],
                               axis=axis)
    up_conv_64 = double_conv_layer(up_64, 2 * FILTER_SIZE, INPUT_SIZE,
                                   dropout_rate, batch_norm)
    # double layer 9
    up_128 = layers.concatenate([
        layers.UpSampling2D(size=(UP_SAMP_SIZE, UP_SAMP_SIZE),
                            data_format="channels_last")(up_conv_64), conv_128
    ],
                                axis=axis)
    up_conv_128 = double_conv_layer(up_128, FILTER_SIZE, INPUT_SIZE,
                                    dropout_rate, batch_norm)

    # 1*1 convolutional layers
    # valid padding
    # batch normalization
    # sigmoid nonlinear activation
    conv_final = layers.Conv2D(OUTPUT_MASK_CHANNEL,
                               kernel_size=(1, 1))(up_conv_128)
    conv_final = layers.BatchNormalization(axis=axis)(conv_final)
    conv_final = layers.Activation('sigmoid')(conv_final)

    # Model integration
    model = models.Model(inputs, conv_final, name="UNet")
    return model
#input layer
main_input = layers.Input(shape=(100,), dtype='int32', name='main_input')
#Embedding layer
x = layers.Embedding(len(tokenizer.word_counts)+1,128,weights=[W],input_length=100, trainable=True,name='word_embedding')(main_input)
# Conv layer
conv1 =layers.Conv1D(filters=32, kernel_size =2, strides=1, padding='same',activation='relu', use_bias=True)(x)
conv2 =layers.Conv1D(filters=32, kernel_size =3, strides=1, padding='same',activation='relu', use_bias=True)(x)
conv3 =layers.Conv1D(filters=32, kernel_size =4, strides=1, padding='same',activation='relu', use_bias=True)(x)

#Pool layer
pool1 = layers.MaxPooling1D(pool_size=2, strides=None, padding='valid')(conv1)
pool2 = layers.MaxPooling1D(pool_size=2, strides=None, padding='valid')(conv2)
pool3 = layers.MaxPooling1D(pool_size=2, strides=None, padding='valid')(conv3)

#concat & Flatten layer to feed to Dense layer
concat = layers.concatenate([pool1, pool2, pool3], axis=1)
out1 = layers.Reshape((4800,))(concat)

# Dense layer
dense1 = layers.Dense(100, activation='relu')(out1)
# Softmax layer
main_output = layers.Dense(33, activation='softmax', name='main_output')(dense1)
# Model - put together Input & Output
model = models.Model(inputs=[main_input], outputs=[main_output])
# Compile Model
model.compile(optimizer='rmsprop', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
################################
# Train the model
############################
hist = model.fit(x_train, y_train,epochs=1, batch_size=512,validation_data = [x_test, y_test])
#####################
コード例 #13
0
    def create_model(self, img_shape, num_class):

        concat_axis = 3
        inputs = layers.Input(shape=img_shape)

        conv1 = layers.Conv2D(32, (3, 3),
                              activation='relu',
                              padding='same',
                              name='conv1_1')(inputs)
        conv1 = layers.Conv2D(32, (3, 3), activation='relu',
                              padding='same')(conv1)
        pool1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)
        conv2 = layers.Conv2D(64, (3, 3), activation='relu',
                              padding='same')(pool1)
        conv2 = layers.Conv2D(64, (3, 3), activation='relu',
                              padding='same')(conv2)
        pool2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)

        conv3 = layers.Conv2D(128, (3, 3), activation='relu',
                              padding='same')(pool2)
        conv3 = layers.Conv2D(128, (3, 3), activation='relu',
                              padding='same')(conv3)
        pool3 = layers.MaxPooling2D(pool_size=(2, 2))(conv3)

        conv4 = layers.Conv2D(256, (3, 3), activation='relu',
                              padding='same')(pool3)
        conv4 = layers.Conv2D(256, (3, 3), activation='relu',
                              padding='same')(conv4)
        pool4 = layers.MaxPooling2D(pool_size=(2, 2))(conv4)

        ## Use dilated convolution
        x = pool4
        depth = 3  #3 #6
        dilated_layers = []
        mode = 'cascade'

        if mode == 'cascade':
            for i in range(depth):
                x = layers.Conv2D(512, (3, 3),
                                  activation='relu',
                                  padding='same',
                                  dilation_rate=2**i)(x)
                dilated_layers.append(x)
            conv5 = layers.add(dilated_layers)
        elif mode == 'parallel':  #"Atrous Spatial Pyramid Pooling"
            for i in range(depth):
                dilated_layers.append(
                    layers.Conv2D(512, (3, 3),
                                  activation='relu',
                                  padding='same',
                                  dilation_rate=2**i)(x))
            conv5 = layers.add(dilated_layers)

#conv5 = layers.Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
#conv5 = layers.Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)

        up_conv5 = layers.UpSampling2D(size=(2, 2))(conv5)
        ch, cw = self.get_crop_shape(conv4, up_conv5)
        crop_conv4 = layers.Cropping2D(cropping=(ch, cw))(conv4)
        up6 = layers.concatenate([up_conv5, crop_conv4], axis=concat_axis)
        conv6 = layers.Conv2D(256, (3, 3), activation='relu',
                              padding='same')(up6)
        conv6 = layers.Conv2D(256, (3, 3), activation='relu',
                              padding='same')(conv6)

        up_conv6 = layers.UpSampling2D(size=(2, 2))(conv6)
        ch, cw = self.get_crop_shape(conv3, up_conv6)
        crop_conv3 = layers.Cropping2D(cropping=(ch, cw))(conv3)
        up7 = layers.concatenate([up_conv6, crop_conv3], axis=concat_axis)
        conv7 = layers.Conv2D(128, (3, 3), activation='relu',
                              padding='same')(up7)
        conv7 = layers.Conv2D(128, (3, 3), activation='relu',
                              padding='same')(conv7)

        up_conv7 = layers.UpSampling2D(size=(2, 2))(conv7)
        ch, cw = self.get_crop_shape(conv2, up_conv7)
        crop_conv2 = layers.Cropping2D(cropping=(ch, cw))(conv2)
        up8 = layers.concatenate([up_conv7, crop_conv2], axis=concat_axis)
        conv8 = layers.Conv2D(64, (3, 3), activation='relu',
                              padding='same')(up8)
        conv8 = layers.Conv2D(64, (3, 3), activation='relu',
                              padding='same')(conv8)

        up_conv8 = layers.UpSampling2D(size=(2, 2))(conv8)
        ch, cw = self.get_crop_shape(conv1, up_conv8)
        crop_conv1 = layers.Cropping2D(cropping=(ch, cw))(conv1)
        up9 = layers.concatenate([up_conv8, crop_conv1], axis=concat_axis)
        conv9 = layers.Conv2D(32, (3, 3), activation='relu',
                              padding='same')(up9)
        conv9 = layers.Conv2D(32, (3, 3), activation='relu',
                              padding='same')(conv9)

        ch, cw = self.get_crop_shape(inputs, conv9)
        conv9 = layers.ZeroPadding2D(padding=((ch[0], ch[1]), (cw[0],
                                                               cw[1])))(conv9)
        conv10 = layers.Conv2D(num_class, (1, 1))(conv9)

        model = models.Model(inputs=inputs, outputs=conv10)

        return model